Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107                                u64 num_bytes);
108
109 static noinline int
110 block_group_cache_done(struct btrfs_block_group_cache *cache)
111 {
112         smp_mb();
113         return cache->cached == BTRFS_CACHE_FINISHED;
114 }
115
116 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
117 {
118         return (cache->flags & bits) == bits;
119 }
120
121 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
122 {
123         atomic_inc(&cache->count);
124 }
125
126 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
127 {
128         if (atomic_dec_and_test(&cache->count)) {
129                 WARN_ON(cache->pinned > 0);
130                 WARN_ON(cache->reserved > 0);
131                 kfree(cache->free_space_ctl);
132                 kfree(cache);
133         }
134 }
135
136 /*
137  * this adds the block group to the fs_info rb tree for the block group
138  * cache
139  */
140 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
141                                 struct btrfs_block_group_cache *block_group)
142 {
143         struct rb_node **p;
144         struct rb_node *parent = NULL;
145         struct btrfs_block_group_cache *cache;
146
147         spin_lock(&info->block_group_cache_lock);
148         p = &info->block_group_cache_tree.rb_node;
149
150         while (*p) {
151                 parent = *p;
152                 cache = rb_entry(parent, struct btrfs_block_group_cache,
153                                  cache_node);
154                 if (block_group->key.objectid < cache->key.objectid) {
155                         p = &(*p)->rb_left;
156                 } else if (block_group->key.objectid > cache->key.objectid) {
157                         p = &(*p)->rb_right;
158                 } else {
159                         spin_unlock(&info->block_group_cache_lock);
160                         return -EEXIST;
161                 }
162         }
163
164         rb_link_node(&block_group->cache_node, parent, p);
165         rb_insert_color(&block_group->cache_node,
166                         &info->block_group_cache_tree);
167
168         if (info->first_logical_byte > block_group->key.objectid)
169                 info->first_logical_byte = block_group->key.objectid;
170
171         spin_unlock(&info->block_group_cache_lock);
172
173         return 0;
174 }
175
176 /*
177  * This will return the block group at or after bytenr if contains is 0, else
178  * it will return the block group that contains the bytenr
179  */
180 static struct btrfs_block_group_cache *
181 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
182                               int contains)
183 {
184         struct btrfs_block_group_cache *cache, *ret = NULL;
185         struct rb_node *n;
186         u64 end, start;
187
188         spin_lock(&info->block_group_cache_lock);
189         n = info->block_group_cache_tree.rb_node;
190
191         while (n) {
192                 cache = rb_entry(n, struct btrfs_block_group_cache,
193                                  cache_node);
194                 end = cache->key.objectid + cache->key.offset - 1;
195                 start = cache->key.objectid;
196
197                 if (bytenr < start) {
198                         if (!contains && (!ret || start < ret->key.objectid))
199                                 ret = cache;
200                         n = n->rb_left;
201                 } else if (bytenr > start) {
202                         if (contains && bytenr <= end) {
203                                 ret = cache;
204                                 break;
205                         }
206                         n = n->rb_right;
207                 } else {
208                         ret = cache;
209                         break;
210                 }
211         }
212         if (ret) {
213                 btrfs_get_block_group(ret);
214                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
215                         info->first_logical_byte = ret->key.objectid;
216         }
217         spin_unlock(&info->block_group_cache_lock);
218
219         return ret;
220 }
221
222 static int add_excluded_extent(struct btrfs_root *root,
223                                u64 start, u64 num_bytes)
224 {
225         u64 end = start + num_bytes - 1;
226         set_extent_bits(&root->fs_info->freed_extents[0],
227                         start, end, EXTENT_UPTODATE, GFP_NOFS);
228         set_extent_bits(&root->fs_info->freed_extents[1],
229                         start, end, EXTENT_UPTODATE, GFP_NOFS);
230         return 0;
231 }
232
233 static void free_excluded_extents(struct btrfs_root *root,
234                                   struct btrfs_block_group_cache *cache)
235 {
236         u64 start, end;
237
238         start = cache->key.objectid;
239         end = start + cache->key.offset - 1;
240
241         clear_extent_bits(&root->fs_info->freed_extents[0],
242                           start, end, EXTENT_UPTODATE, GFP_NOFS);
243         clear_extent_bits(&root->fs_info->freed_extents[1],
244                           start, end, EXTENT_UPTODATE, GFP_NOFS);
245 }
246
247 static int exclude_super_stripes(struct btrfs_root *root,
248                                  struct btrfs_block_group_cache *cache)
249 {
250         u64 bytenr;
251         u64 *logical;
252         int stripe_len;
253         int i, nr, ret;
254
255         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
256                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
257                 cache->bytes_super += stripe_len;
258                 ret = add_excluded_extent(root, cache->key.objectid,
259                                           stripe_len);
260                 BUG_ON(ret); /* -ENOMEM */
261         }
262
263         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
264                 bytenr = btrfs_sb_offset(i);
265                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
266                                        cache->key.objectid, bytenr,
267                                        0, &logical, &nr, &stripe_len);
268                 BUG_ON(ret); /* -ENOMEM */
269
270                 while (nr--) {
271                         cache->bytes_super += stripe_len;
272                         ret = add_excluded_extent(root, logical[nr],
273                                                   stripe_len);
274                         BUG_ON(ret); /* -ENOMEM */
275                 }
276
277                 kfree(logical);
278         }
279         return 0;
280 }
281
282 static struct btrfs_caching_control *
283 get_caching_control(struct btrfs_block_group_cache *cache)
284 {
285         struct btrfs_caching_control *ctl;
286
287         spin_lock(&cache->lock);
288         if (cache->cached != BTRFS_CACHE_STARTED) {
289                 spin_unlock(&cache->lock);
290                 return NULL;
291         }
292
293         /* We're loading it the fast way, so we don't have a caching_ctl. */
294         if (!cache->caching_ctl) {
295                 spin_unlock(&cache->lock);
296                 return NULL;
297         }
298
299         ctl = cache->caching_ctl;
300         atomic_inc(&ctl->count);
301         spin_unlock(&cache->lock);
302         return ctl;
303 }
304
305 static void put_caching_control(struct btrfs_caching_control *ctl)
306 {
307         if (atomic_dec_and_test(&ctl->count))
308                 kfree(ctl);
309 }
310
311 /*
312  * this is only called by cache_block_group, since we could have freed extents
313  * we need to check the pinned_extents for any extents that can't be used yet
314  * since their free space will be released as soon as the transaction commits.
315  */
316 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
317                               struct btrfs_fs_info *info, u64 start, u64 end)
318 {
319         u64 extent_start, extent_end, size, total_added = 0;
320         int ret;
321
322         while (start < end) {
323                 ret = find_first_extent_bit(info->pinned_extents, start,
324                                             &extent_start, &extent_end,
325                                             EXTENT_DIRTY | EXTENT_UPTODATE,
326                                             NULL);
327                 if (ret)
328                         break;
329
330                 if (extent_start <= start) {
331                         start = extent_end + 1;
332                 } else if (extent_start > start && extent_start < end) {
333                         size = extent_start - start;
334                         total_added += size;
335                         ret = btrfs_add_free_space(block_group, start,
336                                                    size);
337                         BUG_ON(ret); /* -ENOMEM or logic error */
338                         start = extent_end + 1;
339                 } else {
340                         break;
341                 }
342         }
343
344         if (start < end) {
345                 size = end - start;
346                 total_added += size;
347                 ret = btrfs_add_free_space(block_group, start, size);
348                 BUG_ON(ret); /* -ENOMEM or logic error */
349         }
350
351         return total_added;
352 }
353
354 static noinline void caching_thread(struct btrfs_work *work)
355 {
356         struct btrfs_block_group_cache *block_group;
357         struct btrfs_fs_info *fs_info;
358         struct btrfs_caching_control *caching_ctl;
359         struct btrfs_root *extent_root;
360         struct btrfs_path *path;
361         struct extent_buffer *leaf;
362         struct btrfs_key key;
363         u64 total_found = 0;
364         u64 last = 0;
365         u32 nritems;
366         int ret = 0;
367
368         caching_ctl = container_of(work, struct btrfs_caching_control, work);
369         block_group = caching_ctl->block_group;
370         fs_info = block_group->fs_info;
371         extent_root = fs_info->extent_root;
372
373         path = btrfs_alloc_path();
374         if (!path)
375                 goto out;
376
377         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
378
379         /*
380          * We don't want to deadlock with somebody trying to allocate a new
381          * extent for the extent root while also trying to search the extent
382          * root to add free space.  So we skip locking and search the commit
383          * root, since its read-only
384          */
385         path->skip_locking = 1;
386         path->search_commit_root = 1;
387         path->reada = 1;
388
389         key.objectid = last;
390         key.offset = 0;
391         key.type = BTRFS_EXTENT_ITEM_KEY;
392 again:
393         mutex_lock(&caching_ctl->mutex);
394         /* need to make sure the commit_root doesn't disappear */
395         down_read(&fs_info->extent_commit_sem);
396
397         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
398         if (ret < 0)
399                 goto err;
400
401         leaf = path->nodes[0];
402         nritems = btrfs_header_nritems(leaf);
403
404         while (1) {
405                 if (btrfs_fs_closing(fs_info) > 1) {
406                         last = (u64)-1;
407                         break;
408                 }
409
410                 if (path->slots[0] < nritems) {
411                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
412                 } else {
413                         ret = find_next_key(path, 0, &key);
414                         if (ret)
415                                 break;
416
417                         if (need_resched() ||
418                             btrfs_next_leaf(extent_root, path)) {
419                                 caching_ctl->progress = last;
420                                 btrfs_release_path(path);
421                                 up_read(&fs_info->extent_commit_sem);
422                                 mutex_unlock(&caching_ctl->mutex);
423                                 cond_resched();
424                                 goto again;
425                         }
426                         leaf = path->nodes[0];
427                         nritems = btrfs_header_nritems(leaf);
428                         continue;
429                 }
430
431                 if (key.objectid < block_group->key.objectid) {
432                         path->slots[0]++;
433                         continue;
434                 }
435
436                 if (key.objectid >= block_group->key.objectid +
437                     block_group->key.offset)
438                         break;
439
440                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
441                         total_found += add_new_free_space(block_group,
442                                                           fs_info, last,
443                                                           key.objectid);
444                         last = key.objectid + key.offset;
445
446                         if (total_found > (1024 * 1024 * 2)) {
447                                 total_found = 0;
448                                 wake_up(&caching_ctl->wait);
449                         }
450                 }
451                 path->slots[0]++;
452         }
453         ret = 0;
454
455         total_found += add_new_free_space(block_group, fs_info, last,
456                                           block_group->key.objectid +
457                                           block_group->key.offset);
458         caching_ctl->progress = (u64)-1;
459
460         spin_lock(&block_group->lock);
461         block_group->caching_ctl = NULL;
462         block_group->cached = BTRFS_CACHE_FINISHED;
463         spin_unlock(&block_group->lock);
464
465 err:
466         btrfs_free_path(path);
467         up_read(&fs_info->extent_commit_sem);
468
469         free_excluded_extents(extent_root, block_group);
470
471         mutex_unlock(&caching_ctl->mutex);
472 out:
473         wake_up(&caching_ctl->wait);
474
475         put_caching_control(caching_ctl);
476         btrfs_put_block_group(block_group);
477 }
478
479 static int cache_block_group(struct btrfs_block_group_cache *cache,
480                              int load_cache_only)
481 {
482         DEFINE_WAIT(wait);
483         struct btrfs_fs_info *fs_info = cache->fs_info;
484         struct btrfs_caching_control *caching_ctl;
485         int ret = 0;
486
487         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
488         if (!caching_ctl)
489                 return -ENOMEM;
490
491         INIT_LIST_HEAD(&caching_ctl->list);
492         mutex_init(&caching_ctl->mutex);
493         init_waitqueue_head(&caching_ctl->wait);
494         caching_ctl->block_group = cache;
495         caching_ctl->progress = cache->key.objectid;
496         atomic_set(&caching_ctl->count, 1);
497         caching_ctl->work.func = caching_thread;
498
499         spin_lock(&cache->lock);
500         /*
501          * This should be a rare occasion, but this could happen I think in the
502          * case where one thread starts to load the space cache info, and then
503          * some other thread starts a transaction commit which tries to do an
504          * allocation while the other thread is still loading the space cache
505          * info.  The previous loop should have kept us from choosing this block
506          * group, but if we've moved to the state where we will wait on caching
507          * block groups we need to first check if we're doing a fast load here,
508          * so we can wait for it to finish, otherwise we could end up allocating
509          * from a block group who's cache gets evicted for one reason or
510          * another.
511          */
512         while (cache->cached == BTRFS_CACHE_FAST) {
513                 struct btrfs_caching_control *ctl;
514
515                 ctl = cache->caching_ctl;
516                 atomic_inc(&ctl->count);
517                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
518                 spin_unlock(&cache->lock);
519
520                 schedule();
521
522                 finish_wait(&ctl->wait, &wait);
523                 put_caching_control(ctl);
524                 spin_lock(&cache->lock);
525         }
526
527         if (cache->cached != BTRFS_CACHE_NO) {
528                 spin_unlock(&cache->lock);
529                 kfree(caching_ctl);
530                 return 0;
531         }
532         WARN_ON(cache->caching_ctl);
533         cache->caching_ctl = caching_ctl;
534         cache->cached = BTRFS_CACHE_FAST;
535         spin_unlock(&cache->lock);
536
537         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
538                 ret = load_free_space_cache(fs_info, cache);
539
540                 spin_lock(&cache->lock);
541                 if (ret == 1) {
542                         cache->caching_ctl = NULL;
543                         cache->cached = BTRFS_CACHE_FINISHED;
544                         cache->last_byte_to_unpin = (u64)-1;
545                 } else {
546                         if (load_cache_only) {
547                                 cache->caching_ctl = NULL;
548                                 cache->cached = BTRFS_CACHE_NO;
549                         } else {
550                                 cache->cached = BTRFS_CACHE_STARTED;
551                         }
552                 }
553                 spin_unlock(&cache->lock);
554                 wake_up(&caching_ctl->wait);
555                 if (ret == 1) {
556                         put_caching_control(caching_ctl);
557                         free_excluded_extents(fs_info->extent_root, cache);
558                         return 0;
559                 }
560         } else {
561                 /*
562                  * We are not going to do the fast caching, set cached to the
563                  * appropriate value and wakeup any waiters.
564                  */
565                 spin_lock(&cache->lock);
566                 if (load_cache_only) {
567                         cache->caching_ctl = NULL;
568                         cache->cached = BTRFS_CACHE_NO;
569                 } else {
570                         cache->cached = BTRFS_CACHE_STARTED;
571                 }
572                 spin_unlock(&cache->lock);
573                 wake_up(&caching_ctl->wait);
574         }
575
576         if (load_cache_only) {
577                 put_caching_control(caching_ctl);
578                 return 0;
579         }
580
581         down_write(&fs_info->extent_commit_sem);
582         atomic_inc(&caching_ctl->count);
583         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
584         up_write(&fs_info->extent_commit_sem);
585
586         btrfs_get_block_group(cache);
587
588         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
589
590         return ret;
591 }
592
593 /*
594  * return the block group that starts at or after bytenr
595  */
596 static struct btrfs_block_group_cache *
597 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
598 {
599         struct btrfs_block_group_cache *cache;
600
601         cache = block_group_cache_tree_search(info, bytenr, 0);
602
603         return cache;
604 }
605
606 /*
607  * return the block group that contains the given bytenr
608  */
609 struct btrfs_block_group_cache *btrfs_lookup_block_group(
610                                                  struct btrfs_fs_info *info,
611                                                  u64 bytenr)
612 {
613         struct btrfs_block_group_cache *cache;
614
615         cache = block_group_cache_tree_search(info, bytenr, 1);
616
617         return cache;
618 }
619
620 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
621                                                   u64 flags)
622 {
623         struct list_head *head = &info->space_info;
624         struct btrfs_space_info *found;
625
626         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
627
628         rcu_read_lock();
629         list_for_each_entry_rcu(found, head, list) {
630                 if (found->flags & flags) {
631                         rcu_read_unlock();
632                         return found;
633                 }
634         }
635         rcu_read_unlock();
636         return NULL;
637 }
638
639 /*
640  * after adding space to the filesystem, we need to clear the full flags
641  * on all the space infos.
642  */
643 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
644 {
645         struct list_head *head = &info->space_info;
646         struct btrfs_space_info *found;
647
648         rcu_read_lock();
649         list_for_each_entry_rcu(found, head, list)
650                 found->full = 0;
651         rcu_read_unlock();
652 }
653
654 u64 btrfs_find_block_group(struct btrfs_root *root,
655                            u64 search_start, u64 search_hint, int owner)
656 {
657         struct btrfs_block_group_cache *cache;
658         u64 used;
659         u64 last = max(search_hint, search_start);
660         u64 group_start = 0;
661         int full_search = 0;
662         int factor = 9;
663         int wrapped = 0;
664 again:
665         while (1) {
666                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
667                 if (!cache)
668                         break;
669
670                 spin_lock(&cache->lock);
671                 last = cache->key.objectid + cache->key.offset;
672                 used = btrfs_block_group_used(&cache->item);
673
674                 if ((full_search || !cache->ro) &&
675                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
676                         if (used + cache->pinned + cache->reserved <
677                             div_factor(cache->key.offset, factor)) {
678                                 group_start = cache->key.objectid;
679                                 spin_unlock(&cache->lock);
680                                 btrfs_put_block_group(cache);
681                                 goto found;
682                         }
683                 }
684                 spin_unlock(&cache->lock);
685                 btrfs_put_block_group(cache);
686                 cond_resched();
687         }
688         if (!wrapped) {
689                 last = search_start;
690                 wrapped = 1;
691                 goto again;
692         }
693         if (!full_search && factor < 10) {
694                 last = search_start;
695                 full_search = 1;
696                 factor = 10;
697                 goto again;
698         }
699 found:
700         return group_start;
701 }
702
703 /* simple helper to search for an existing extent at a given offset */
704 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
705 {
706         int ret;
707         struct btrfs_key key;
708         struct btrfs_path *path;
709
710         path = btrfs_alloc_path();
711         if (!path)
712                 return -ENOMEM;
713
714         key.objectid = start;
715         key.offset = len;
716         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
717         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
718                                 0, 0);
719         btrfs_free_path(path);
720         return ret;
721 }
722
723 /*
724  * helper function to lookup reference count and flags of extent.
725  *
726  * the head node for delayed ref is used to store the sum of all the
727  * reference count modifications queued up in the rbtree. the head
728  * node may also store the extent flags to set. This way you can check
729  * to see what the reference count and extent flags would be if all of
730  * the delayed refs are not processed.
731  */
732 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
733                              struct btrfs_root *root, u64 bytenr,
734                              u64 num_bytes, u64 *refs, u64 *flags)
735 {
736         struct btrfs_delayed_ref_head *head;
737         struct btrfs_delayed_ref_root *delayed_refs;
738         struct btrfs_path *path;
739         struct btrfs_extent_item *ei;
740         struct extent_buffer *leaf;
741         struct btrfs_key key;
742         u32 item_size;
743         u64 num_refs;
744         u64 extent_flags;
745         int ret;
746
747         path = btrfs_alloc_path();
748         if (!path)
749                 return -ENOMEM;
750
751         key.objectid = bytenr;
752         key.type = BTRFS_EXTENT_ITEM_KEY;
753         key.offset = num_bytes;
754         if (!trans) {
755                 path->skip_locking = 1;
756                 path->search_commit_root = 1;
757         }
758 again:
759         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
760                                 &key, path, 0, 0);
761         if (ret < 0)
762                 goto out_free;
763
764         if (ret == 0) {
765                 leaf = path->nodes[0];
766                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
767                 if (item_size >= sizeof(*ei)) {
768                         ei = btrfs_item_ptr(leaf, path->slots[0],
769                                             struct btrfs_extent_item);
770                         num_refs = btrfs_extent_refs(leaf, ei);
771                         extent_flags = btrfs_extent_flags(leaf, ei);
772                 } else {
773 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
774                         struct btrfs_extent_item_v0 *ei0;
775                         BUG_ON(item_size != sizeof(*ei0));
776                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
777                                              struct btrfs_extent_item_v0);
778                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
779                         /* FIXME: this isn't correct for data */
780                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
781 #else
782                         BUG();
783 #endif
784                 }
785                 BUG_ON(num_refs == 0);
786         } else {
787                 num_refs = 0;
788                 extent_flags = 0;
789                 ret = 0;
790         }
791
792         if (!trans)
793                 goto out;
794
795         delayed_refs = &trans->transaction->delayed_refs;
796         spin_lock(&delayed_refs->lock);
797         head = btrfs_find_delayed_ref_head(trans, bytenr);
798         if (head) {
799                 if (!mutex_trylock(&head->mutex)) {
800                         atomic_inc(&head->node.refs);
801                         spin_unlock(&delayed_refs->lock);
802
803                         btrfs_release_path(path);
804
805                         /*
806                          * Mutex was contended, block until it's released and try
807                          * again
808                          */
809                         mutex_lock(&head->mutex);
810                         mutex_unlock(&head->mutex);
811                         btrfs_put_delayed_ref(&head->node);
812                         goto again;
813                 }
814                 if (head->extent_op && head->extent_op->update_flags)
815                         extent_flags |= head->extent_op->flags_to_set;
816                 else
817                         BUG_ON(num_refs == 0);
818
819                 num_refs += head->node.ref_mod;
820                 mutex_unlock(&head->mutex);
821         }
822         spin_unlock(&delayed_refs->lock);
823 out:
824         WARN_ON(num_refs == 0);
825         if (refs)
826                 *refs = num_refs;
827         if (flags)
828                 *flags = extent_flags;
829 out_free:
830         btrfs_free_path(path);
831         return ret;
832 }
833
834 /*
835  * Back reference rules.  Back refs have three main goals:
836  *
837  * 1) differentiate between all holders of references to an extent so that
838  *    when a reference is dropped we can make sure it was a valid reference
839  *    before freeing the extent.
840  *
841  * 2) Provide enough information to quickly find the holders of an extent
842  *    if we notice a given block is corrupted or bad.
843  *
844  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
845  *    maintenance.  This is actually the same as #2, but with a slightly
846  *    different use case.
847  *
848  * There are two kinds of back refs. The implicit back refs is optimized
849  * for pointers in non-shared tree blocks. For a given pointer in a block,
850  * back refs of this kind provide information about the block's owner tree
851  * and the pointer's key. These information allow us to find the block by
852  * b-tree searching. The full back refs is for pointers in tree blocks not
853  * referenced by their owner trees. The location of tree block is recorded
854  * in the back refs. Actually the full back refs is generic, and can be
855  * used in all cases the implicit back refs is used. The major shortcoming
856  * of the full back refs is its overhead. Every time a tree block gets
857  * COWed, we have to update back refs entry for all pointers in it.
858  *
859  * For a newly allocated tree block, we use implicit back refs for
860  * pointers in it. This means most tree related operations only involve
861  * implicit back refs. For a tree block created in old transaction, the
862  * only way to drop a reference to it is COW it. So we can detect the
863  * event that tree block loses its owner tree's reference and do the
864  * back refs conversion.
865  *
866  * When a tree block is COW'd through a tree, there are four cases:
867  *
868  * The reference count of the block is one and the tree is the block's
869  * owner tree. Nothing to do in this case.
870  *
871  * The reference count of the block is one and the tree is not the
872  * block's owner tree. In this case, full back refs is used for pointers
873  * in the block. Remove these full back refs, add implicit back refs for
874  * every pointers in the new block.
875  *
876  * The reference count of the block is greater than one and the tree is
877  * the block's owner tree. In this case, implicit back refs is used for
878  * pointers in the block. Add full back refs for every pointers in the
879  * block, increase lower level extents' reference counts. The original
880  * implicit back refs are entailed to the new block.
881  *
882  * The reference count of the block is greater than one and the tree is
883  * not the block's owner tree. Add implicit back refs for every pointer in
884  * the new block, increase lower level extents' reference count.
885  *
886  * Back Reference Key composing:
887  *
888  * The key objectid corresponds to the first byte in the extent,
889  * The key type is used to differentiate between types of back refs.
890  * There are different meanings of the key offset for different types
891  * of back refs.
892  *
893  * File extents can be referenced by:
894  *
895  * - multiple snapshots, subvolumes, or different generations in one subvol
896  * - different files inside a single subvolume
897  * - different offsets inside a file (bookend extents in file.c)
898  *
899  * The extent ref structure for the implicit back refs has fields for:
900  *
901  * - Objectid of the subvolume root
902  * - objectid of the file holding the reference
903  * - original offset in the file
904  * - how many bookend extents
905  *
906  * The key offset for the implicit back refs is hash of the first
907  * three fields.
908  *
909  * The extent ref structure for the full back refs has field for:
910  *
911  * - number of pointers in the tree leaf
912  *
913  * The key offset for the implicit back refs is the first byte of
914  * the tree leaf
915  *
916  * When a file extent is allocated, The implicit back refs is used.
917  * the fields are filled in:
918  *
919  *     (root_key.objectid, inode objectid, offset in file, 1)
920  *
921  * When a file extent is removed file truncation, we find the
922  * corresponding implicit back refs and check the following fields:
923  *
924  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
925  *
926  * Btree extents can be referenced by:
927  *
928  * - Different subvolumes
929  *
930  * Both the implicit back refs and the full back refs for tree blocks
931  * only consist of key. The key offset for the implicit back refs is
932  * objectid of block's owner tree. The key offset for the full back refs
933  * is the first byte of parent block.
934  *
935  * When implicit back refs is used, information about the lowest key and
936  * level of the tree block are required. These information are stored in
937  * tree block info structure.
938  */
939
940 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
941 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
942                                   struct btrfs_root *root,
943                                   struct btrfs_path *path,
944                                   u64 owner, u32 extra_size)
945 {
946         struct btrfs_extent_item *item;
947         struct btrfs_extent_item_v0 *ei0;
948         struct btrfs_extent_ref_v0 *ref0;
949         struct btrfs_tree_block_info *bi;
950         struct extent_buffer *leaf;
951         struct btrfs_key key;
952         struct btrfs_key found_key;
953         u32 new_size = sizeof(*item);
954         u64 refs;
955         int ret;
956
957         leaf = path->nodes[0];
958         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
959
960         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
961         ei0 = btrfs_item_ptr(leaf, path->slots[0],
962                              struct btrfs_extent_item_v0);
963         refs = btrfs_extent_refs_v0(leaf, ei0);
964
965         if (owner == (u64)-1) {
966                 while (1) {
967                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
968                                 ret = btrfs_next_leaf(root, path);
969                                 if (ret < 0)
970                                         return ret;
971                                 BUG_ON(ret > 0); /* Corruption */
972                                 leaf = path->nodes[0];
973                         }
974                         btrfs_item_key_to_cpu(leaf, &found_key,
975                                               path->slots[0]);
976                         BUG_ON(key.objectid != found_key.objectid);
977                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
978                                 path->slots[0]++;
979                                 continue;
980                         }
981                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
982                                               struct btrfs_extent_ref_v0);
983                         owner = btrfs_ref_objectid_v0(leaf, ref0);
984                         break;
985                 }
986         }
987         btrfs_release_path(path);
988
989         if (owner < BTRFS_FIRST_FREE_OBJECTID)
990                 new_size += sizeof(*bi);
991
992         new_size -= sizeof(*ei0);
993         ret = btrfs_search_slot(trans, root, &key, path,
994                                 new_size + extra_size, 1);
995         if (ret < 0)
996                 return ret;
997         BUG_ON(ret); /* Corruption */
998
999         btrfs_extend_item(trans, root, path, new_size);
1000
1001         leaf = path->nodes[0];
1002         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1003         btrfs_set_extent_refs(leaf, item, refs);
1004         /* FIXME: get real generation */
1005         btrfs_set_extent_generation(leaf, item, 0);
1006         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1007                 btrfs_set_extent_flags(leaf, item,
1008                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1009                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1010                 bi = (struct btrfs_tree_block_info *)(item + 1);
1011                 /* FIXME: get first key of the block */
1012                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1013                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1014         } else {
1015                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1016         }
1017         btrfs_mark_buffer_dirty(leaf);
1018         return 0;
1019 }
1020 #endif
1021
1022 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1023 {
1024         u32 high_crc = ~(u32)0;
1025         u32 low_crc = ~(u32)0;
1026         __le64 lenum;
1027
1028         lenum = cpu_to_le64(root_objectid);
1029         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1030         lenum = cpu_to_le64(owner);
1031         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1032         lenum = cpu_to_le64(offset);
1033         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1034
1035         return ((u64)high_crc << 31) ^ (u64)low_crc;
1036 }
1037
1038 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1039                                      struct btrfs_extent_data_ref *ref)
1040 {
1041         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1042                                     btrfs_extent_data_ref_objectid(leaf, ref),
1043                                     btrfs_extent_data_ref_offset(leaf, ref));
1044 }
1045
1046 static int match_extent_data_ref(struct extent_buffer *leaf,
1047                                  struct btrfs_extent_data_ref *ref,
1048                                  u64 root_objectid, u64 owner, u64 offset)
1049 {
1050         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1051             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1052             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1053                 return 0;
1054         return 1;
1055 }
1056
1057 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1058                                            struct btrfs_root *root,
1059                                            struct btrfs_path *path,
1060                                            u64 bytenr, u64 parent,
1061                                            u64 root_objectid,
1062                                            u64 owner, u64 offset)
1063 {
1064         struct btrfs_key key;
1065         struct btrfs_extent_data_ref *ref;
1066         struct extent_buffer *leaf;
1067         u32 nritems;
1068         int ret;
1069         int recow;
1070         int err = -ENOENT;
1071
1072         key.objectid = bytenr;
1073         if (parent) {
1074                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1075                 key.offset = parent;
1076         } else {
1077                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1078                 key.offset = hash_extent_data_ref(root_objectid,
1079                                                   owner, offset);
1080         }
1081 again:
1082         recow = 0;
1083         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1084         if (ret < 0) {
1085                 err = ret;
1086                 goto fail;
1087         }
1088
1089         if (parent) {
1090                 if (!ret)
1091                         return 0;
1092 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1093                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1094                 btrfs_release_path(path);
1095                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096                 if (ret < 0) {
1097                         err = ret;
1098                         goto fail;
1099                 }
1100                 if (!ret)
1101                         return 0;
1102 #endif
1103                 goto fail;
1104         }
1105
1106         leaf = path->nodes[0];
1107         nritems = btrfs_header_nritems(leaf);
1108         while (1) {
1109                 if (path->slots[0] >= nritems) {
1110                         ret = btrfs_next_leaf(root, path);
1111                         if (ret < 0)
1112                                 err = ret;
1113                         if (ret)
1114                                 goto fail;
1115
1116                         leaf = path->nodes[0];
1117                         nritems = btrfs_header_nritems(leaf);
1118                         recow = 1;
1119                 }
1120
1121                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1122                 if (key.objectid != bytenr ||
1123                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1124                         goto fail;
1125
1126                 ref = btrfs_item_ptr(leaf, path->slots[0],
1127                                      struct btrfs_extent_data_ref);
1128
1129                 if (match_extent_data_ref(leaf, ref, root_objectid,
1130                                           owner, offset)) {
1131                         if (recow) {
1132                                 btrfs_release_path(path);
1133                                 goto again;
1134                         }
1135                         err = 0;
1136                         break;
1137                 }
1138                 path->slots[0]++;
1139         }
1140 fail:
1141         return err;
1142 }
1143
1144 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1145                                            struct btrfs_root *root,
1146                                            struct btrfs_path *path,
1147                                            u64 bytenr, u64 parent,
1148                                            u64 root_objectid, u64 owner,
1149                                            u64 offset, int refs_to_add)
1150 {
1151         struct btrfs_key key;
1152         struct extent_buffer *leaf;
1153         u32 size;
1154         u32 num_refs;
1155         int ret;
1156
1157         key.objectid = bytenr;
1158         if (parent) {
1159                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1160                 key.offset = parent;
1161                 size = sizeof(struct btrfs_shared_data_ref);
1162         } else {
1163                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1164                 key.offset = hash_extent_data_ref(root_objectid,
1165                                                   owner, offset);
1166                 size = sizeof(struct btrfs_extent_data_ref);
1167         }
1168
1169         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1170         if (ret && ret != -EEXIST)
1171                 goto fail;
1172
1173         leaf = path->nodes[0];
1174         if (parent) {
1175                 struct btrfs_shared_data_ref *ref;
1176                 ref = btrfs_item_ptr(leaf, path->slots[0],
1177                                      struct btrfs_shared_data_ref);
1178                 if (ret == 0) {
1179                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1180                 } else {
1181                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1182                         num_refs += refs_to_add;
1183                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1184                 }
1185         } else {
1186                 struct btrfs_extent_data_ref *ref;
1187                 while (ret == -EEXIST) {
1188                         ref = btrfs_item_ptr(leaf, path->slots[0],
1189                                              struct btrfs_extent_data_ref);
1190                         if (match_extent_data_ref(leaf, ref, root_objectid,
1191                                                   owner, offset))
1192                                 break;
1193                         btrfs_release_path(path);
1194                         key.offset++;
1195                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1196                                                       size);
1197                         if (ret && ret != -EEXIST)
1198                                 goto fail;
1199
1200                         leaf = path->nodes[0];
1201                 }
1202                 ref = btrfs_item_ptr(leaf, path->slots[0],
1203                                      struct btrfs_extent_data_ref);
1204                 if (ret == 0) {
1205                         btrfs_set_extent_data_ref_root(leaf, ref,
1206                                                        root_objectid);
1207                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1208                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1209                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1210                 } else {
1211                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1212                         num_refs += refs_to_add;
1213                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1214                 }
1215         }
1216         btrfs_mark_buffer_dirty(leaf);
1217         ret = 0;
1218 fail:
1219         btrfs_release_path(path);
1220         return ret;
1221 }
1222
1223 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1224                                            struct btrfs_root *root,
1225                                            struct btrfs_path *path,
1226                                            int refs_to_drop)
1227 {
1228         struct btrfs_key key;
1229         struct btrfs_extent_data_ref *ref1 = NULL;
1230         struct btrfs_shared_data_ref *ref2 = NULL;
1231         struct extent_buffer *leaf;
1232         u32 num_refs = 0;
1233         int ret = 0;
1234
1235         leaf = path->nodes[0];
1236         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1237
1238         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1239                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1240                                       struct btrfs_extent_data_ref);
1241                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1242         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1243                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1244                                       struct btrfs_shared_data_ref);
1245                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1246 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1247         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1248                 struct btrfs_extent_ref_v0 *ref0;
1249                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1250                                       struct btrfs_extent_ref_v0);
1251                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1252 #endif
1253         } else {
1254                 BUG();
1255         }
1256
1257         BUG_ON(num_refs < refs_to_drop);
1258         num_refs -= refs_to_drop;
1259
1260         if (num_refs == 0) {
1261                 ret = btrfs_del_item(trans, root, path);
1262         } else {
1263                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1264                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1265                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1266                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268                 else {
1269                         struct btrfs_extent_ref_v0 *ref0;
1270                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1271                                         struct btrfs_extent_ref_v0);
1272                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1273                 }
1274 #endif
1275                 btrfs_mark_buffer_dirty(leaf);
1276         }
1277         return ret;
1278 }
1279
1280 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1281                                           struct btrfs_path *path,
1282                                           struct btrfs_extent_inline_ref *iref)
1283 {
1284         struct btrfs_key key;
1285         struct extent_buffer *leaf;
1286         struct btrfs_extent_data_ref *ref1;
1287         struct btrfs_shared_data_ref *ref2;
1288         u32 num_refs = 0;
1289
1290         leaf = path->nodes[0];
1291         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1292         if (iref) {
1293                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1294                     BTRFS_EXTENT_DATA_REF_KEY) {
1295                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1296                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1297                 } else {
1298                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1299                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1300                 }
1301         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1302                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1303                                       struct btrfs_extent_data_ref);
1304                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1305         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1306                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1307                                       struct btrfs_shared_data_ref);
1308                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1310         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1311                 struct btrfs_extent_ref_v0 *ref0;
1312                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1313                                       struct btrfs_extent_ref_v0);
1314                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1315 #endif
1316         } else {
1317                 WARN_ON(1);
1318         }
1319         return num_refs;
1320 }
1321
1322 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1323                                           struct btrfs_root *root,
1324                                           struct btrfs_path *path,
1325                                           u64 bytenr, u64 parent,
1326                                           u64 root_objectid)
1327 {
1328         struct btrfs_key key;
1329         int ret;
1330
1331         key.objectid = bytenr;
1332         if (parent) {
1333                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1334                 key.offset = parent;
1335         } else {
1336                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1337                 key.offset = root_objectid;
1338         }
1339
1340         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1341         if (ret > 0)
1342                 ret = -ENOENT;
1343 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1344         if (ret == -ENOENT && parent) {
1345                 btrfs_release_path(path);
1346                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1347                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1348                 if (ret > 0)
1349                         ret = -ENOENT;
1350         }
1351 #endif
1352         return ret;
1353 }
1354
1355 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1356                                           struct btrfs_root *root,
1357                                           struct btrfs_path *path,
1358                                           u64 bytenr, u64 parent,
1359                                           u64 root_objectid)
1360 {
1361         struct btrfs_key key;
1362         int ret;
1363
1364         key.objectid = bytenr;
1365         if (parent) {
1366                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1367                 key.offset = parent;
1368         } else {
1369                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1370                 key.offset = root_objectid;
1371         }
1372
1373         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1374         btrfs_release_path(path);
1375         return ret;
1376 }
1377
1378 static inline int extent_ref_type(u64 parent, u64 owner)
1379 {
1380         int type;
1381         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1382                 if (parent > 0)
1383                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1384                 else
1385                         type = BTRFS_TREE_BLOCK_REF_KEY;
1386         } else {
1387                 if (parent > 0)
1388                         type = BTRFS_SHARED_DATA_REF_KEY;
1389                 else
1390                         type = BTRFS_EXTENT_DATA_REF_KEY;
1391         }
1392         return type;
1393 }
1394
1395 static int find_next_key(struct btrfs_path *path, int level,
1396                          struct btrfs_key *key)
1397
1398 {
1399         for (; level < BTRFS_MAX_LEVEL; level++) {
1400                 if (!path->nodes[level])
1401                         break;
1402                 if (path->slots[level] + 1 >=
1403                     btrfs_header_nritems(path->nodes[level]))
1404                         continue;
1405                 if (level == 0)
1406                         btrfs_item_key_to_cpu(path->nodes[level], key,
1407                                               path->slots[level] + 1);
1408                 else
1409                         btrfs_node_key_to_cpu(path->nodes[level], key,
1410                                               path->slots[level] + 1);
1411                 return 0;
1412         }
1413         return 1;
1414 }
1415
1416 /*
1417  * look for inline back ref. if back ref is found, *ref_ret is set
1418  * to the address of inline back ref, and 0 is returned.
1419  *
1420  * if back ref isn't found, *ref_ret is set to the address where it
1421  * should be inserted, and -ENOENT is returned.
1422  *
1423  * if insert is true and there are too many inline back refs, the path
1424  * points to the extent item, and -EAGAIN is returned.
1425  *
1426  * NOTE: inline back refs are ordered in the same way that back ref
1427  *       items in the tree are ordered.
1428  */
1429 static noinline_for_stack
1430 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1431                                  struct btrfs_root *root,
1432                                  struct btrfs_path *path,
1433                                  struct btrfs_extent_inline_ref **ref_ret,
1434                                  u64 bytenr, u64 num_bytes,
1435                                  u64 parent, u64 root_objectid,
1436                                  u64 owner, u64 offset, int insert)
1437 {
1438         struct btrfs_key key;
1439         struct extent_buffer *leaf;
1440         struct btrfs_extent_item *ei;
1441         struct btrfs_extent_inline_ref *iref;
1442         u64 flags;
1443         u64 item_size;
1444         unsigned long ptr;
1445         unsigned long end;
1446         int extra_size;
1447         int type;
1448         int want;
1449         int ret;
1450         int err = 0;
1451
1452         key.objectid = bytenr;
1453         key.type = BTRFS_EXTENT_ITEM_KEY;
1454         key.offset = num_bytes;
1455
1456         want = extent_ref_type(parent, owner);
1457         if (insert) {
1458                 extra_size = btrfs_extent_inline_ref_size(want);
1459                 path->keep_locks = 1;
1460         } else
1461                 extra_size = -1;
1462         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1463         if (ret < 0) {
1464                 err = ret;
1465                 goto out;
1466         }
1467         if (ret && !insert) {
1468                 err = -ENOENT;
1469                 goto out;
1470         }
1471         BUG_ON(ret); /* Corruption */
1472
1473         leaf = path->nodes[0];
1474         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1475 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1476         if (item_size < sizeof(*ei)) {
1477                 if (!insert) {
1478                         err = -ENOENT;
1479                         goto out;
1480                 }
1481                 ret = convert_extent_item_v0(trans, root, path, owner,
1482                                              extra_size);
1483                 if (ret < 0) {
1484                         err = ret;
1485                         goto out;
1486                 }
1487                 leaf = path->nodes[0];
1488                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1489         }
1490 #endif
1491         BUG_ON(item_size < sizeof(*ei));
1492
1493         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1494         flags = btrfs_extent_flags(leaf, ei);
1495
1496         ptr = (unsigned long)(ei + 1);
1497         end = (unsigned long)ei + item_size;
1498
1499         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1500                 ptr += sizeof(struct btrfs_tree_block_info);
1501                 BUG_ON(ptr > end);
1502         } else {
1503                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1504         }
1505
1506         err = -ENOENT;
1507         while (1) {
1508                 if (ptr >= end) {
1509                         WARN_ON(ptr > end);
1510                         break;
1511                 }
1512                 iref = (struct btrfs_extent_inline_ref *)ptr;
1513                 type = btrfs_extent_inline_ref_type(leaf, iref);
1514                 if (want < type)
1515                         break;
1516                 if (want > type) {
1517                         ptr += btrfs_extent_inline_ref_size(type);
1518                         continue;
1519                 }
1520
1521                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1522                         struct btrfs_extent_data_ref *dref;
1523                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1524                         if (match_extent_data_ref(leaf, dref, root_objectid,
1525                                                   owner, offset)) {
1526                                 err = 0;
1527                                 break;
1528                         }
1529                         if (hash_extent_data_ref_item(leaf, dref) <
1530                             hash_extent_data_ref(root_objectid, owner, offset))
1531                                 break;
1532                 } else {
1533                         u64 ref_offset;
1534                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1535                         if (parent > 0) {
1536                                 if (parent == ref_offset) {
1537                                         err = 0;
1538                                         break;
1539                                 }
1540                                 if (ref_offset < parent)
1541                                         break;
1542                         } else {
1543                                 if (root_objectid == ref_offset) {
1544                                         err = 0;
1545                                         break;
1546                                 }
1547                                 if (ref_offset < root_objectid)
1548                                         break;
1549                         }
1550                 }
1551                 ptr += btrfs_extent_inline_ref_size(type);
1552         }
1553         if (err == -ENOENT && insert) {
1554                 if (item_size + extra_size >=
1555                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1556                         err = -EAGAIN;
1557                         goto out;
1558                 }
1559                 /*
1560                  * To add new inline back ref, we have to make sure
1561                  * there is no corresponding back ref item.
1562                  * For simplicity, we just do not add new inline back
1563                  * ref if there is any kind of item for this block
1564                  */
1565                 if (find_next_key(path, 0, &key) == 0 &&
1566                     key.objectid == bytenr &&
1567                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1568                         err = -EAGAIN;
1569                         goto out;
1570                 }
1571         }
1572         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1573 out:
1574         if (insert) {
1575                 path->keep_locks = 0;
1576                 btrfs_unlock_up_safe(path, 1);
1577         }
1578         return err;
1579 }
1580
1581 /*
1582  * helper to add new inline back ref
1583  */
1584 static noinline_for_stack
1585 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1586                                  struct btrfs_root *root,
1587                                  struct btrfs_path *path,
1588                                  struct btrfs_extent_inline_ref *iref,
1589                                  u64 parent, u64 root_objectid,
1590                                  u64 owner, u64 offset, int refs_to_add,
1591                                  struct btrfs_delayed_extent_op *extent_op)
1592 {
1593         struct extent_buffer *leaf;
1594         struct btrfs_extent_item *ei;
1595         unsigned long ptr;
1596         unsigned long end;
1597         unsigned long item_offset;
1598         u64 refs;
1599         int size;
1600         int type;
1601
1602         leaf = path->nodes[0];
1603         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1604         item_offset = (unsigned long)iref - (unsigned long)ei;
1605
1606         type = extent_ref_type(parent, owner);
1607         size = btrfs_extent_inline_ref_size(type);
1608
1609         btrfs_extend_item(trans, root, path, size);
1610
1611         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1612         refs = btrfs_extent_refs(leaf, ei);
1613         refs += refs_to_add;
1614         btrfs_set_extent_refs(leaf, ei, refs);
1615         if (extent_op)
1616                 __run_delayed_extent_op(extent_op, leaf, ei);
1617
1618         ptr = (unsigned long)ei + item_offset;
1619         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1620         if (ptr < end - size)
1621                 memmove_extent_buffer(leaf, ptr + size, ptr,
1622                                       end - size - ptr);
1623
1624         iref = (struct btrfs_extent_inline_ref *)ptr;
1625         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1626         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1627                 struct btrfs_extent_data_ref *dref;
1628                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1629                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1630                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1631                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1632                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1633         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1634                 struct btrfs_shared_data_ref *sref;
1635                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1636                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1637                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1638         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1639                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1640         } else {
1641                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1642         }
1643         btrfs_mark_buffer_dirty(leaf);
1644 }
1645
1646 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1647                                  struct btrfs_root *root,
1648                                  struct btrfs_path *path,
1649                                  struct btrfs_extent_inline_ref **ref_ret,
1650                                  u64 bytenr, u64 num_bytes, u64 parent,
1651                                  u64 root_objectid, u64 owner, u64 offset)
1652 {
1653         int ret;
1654
1655         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1656                                            bytenr, num_bytes, parent,
1657                                            root_objectid, owner, offset, 0);
1658         if (ret != -ENOENT)
1659                 return ret;
1660
1661         btrfs_release_path(path);
1662         *ref_ret = NULL;
1663
1664         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1665                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1666                                             root_objectid);
1667         } else {
1668                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1669                                              root_objectid, owner, offset);
1670         }
1671         return ret;
1672 }
1673
1674 /*
1675  * helper to update/remove inline back ref
1676  */
1677 static noinline_for_stack
1678 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1679                                   struct btrfs_root *root,
1680                                   struct btrfs_path *path,
1681                                   struct btrfs_extent_inline_ref *iref,
1682                                   int refs_to_mod,
1683                                   struct btrfs_delayed_extent_op *extent_op)
1684 {
1685         struct extent_buffer *leaf;
1686         struct btrfs_extent_item *ei;
1687         struct btrfs_extent_data_ref *dref = NULL;
1688         struct btrfs_shared_data_ref *sref = NULL;
1689         unsigned long ptr;
1690         unsigned long end;
1691         u32 item_size;
1692         int size;
1693         int type;
1694         u64 refs;
1695
1696         leaf = path->nodes[0];
1697         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1698         refs = btrfs_extent_refs(leaf, ei);
1699         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1700         refs += refs_to_mod;
1701         btrfs_set_extent_refs(leaf, ei, refs);
1702         if (extent_op)
1703                 __run_delayed_extent_op(extent_op, leaf, ei);
1704
1705         type = btrfs_extent_inline_ref_type(leaf, iref);
1706
1707         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1708                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1709                 refs = btrfs_extent_data_ref_count(leaf, dref);
1710         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1712                 refs = btrfs_shared_data_ref_count(leaf, sref);
1713         } else {
1714                 refs = 1;
1715                 BUG_ON(refs_to_mod != -1);
1716         }
1717
1718         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1719         refs += refs_to_mod;
1720
1721         if (refs > 0) {
1722                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1723                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1724                 else
1725                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1726         } else {
1727                 size =  btrfs_extent_inline_ref_size(type);
1728                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1729                 ptr = (unsigned long)iref;
1730                 end = (unsigned long)ei + item_size;
1731                 if (ptr + size < end)
1732                         memmove_extent_buffer(leaf, ptr, ptr + size,
1733                                               end - ptr - size);
1734                 item_size -= size;
1735                 btrfs_truncate_item(trans, root, path, item_size, 1);
1736         }
1737         btrfs_mark_buffer_dirty(leaf);
1738 }
1739
1740 static noinline_for_stack
1741 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1742                                  struct btrfs_root *root,
1743                                  struct btrfs_path *path,
1744                                  u64 bytenr, u64 num_bytes, u64 parent,
1745                                  u64 root_objectid, u64 owner,
1746                                  u64 offset, int refs_to_add,
1747                                  struct btrfs_delayed_extent_op *extent_op)
1748 {
1749         struct btrfs_extent_inline_ref *iref;
1750         int ret;
1751
1752         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1753                                            bytenr, num_bytes, parent,
1754                                            root_objectid, owner, offset, 1);
1755         if (ret == 0) {
1756                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1757                 update_inline_extent_backref(trans, root, path, iref,
1758                                              refs_to_add, extent_op);
1759         } else if (ret == -ENOENT) {
1760                 setup_inline_extent_backref(trans, root, path, iref, parent,
1761                                             root_objectid, owner, offset,
1762                                             refs_to_add, extent_op);
1763                 ret = 0;
1764         }
1765         return ret;
1766 }
1767
1768 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1769                                  struct btrfs_root *root,
1770                                  struct btrfs_path *path,
1771                                  u64 bytenr, u64 parent, u64 root_objectid,
1772                                  u64 owner, u64 offset, int refs_to_add)
1773 {
1774         int ret;
1775         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1776                 BUG_ON(refs_to_add != 1);
1777                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1778                                             parent, root_objectid);
1779         } else {
1780                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1781                                              parent, root_objectid,
1782                                              owner, offset, refs_to_add);
1783         }
1784         return ret;
1785 }
1786
1787 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1788                                  struct btrfs_root *root,
1789                                  struct btrfs_path *path,
1790                                  struct btrfs_extent_inline_ref *iref,
1791                                  int refs_to_drop, int is_data)
1792 {
1793         int ret = 0;
1794
1795         BUG_ON(!is_data && refs_to_drop != 1);
1796         if (iref) {
1797                 update_inline_extent_backref(trans, root, path, iref,
1798                                              -refs_to_drop, NULL);
1799         } else if (is_data) {
1800                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1801         } else {
1802                 ret = btrfs_del_item(trans, root, path);
1803         }
1804         return ret;
1805 }
1806
1807 static int btrfs_issue_discard(struct block_device *bdev,
1808                                 u64 start, u64 len)
1809 {
1810         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1811 }
1812
1813 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1814                                 u64 num_bytes, u64 *actual_bytes)
1815 {
1816         int ret;
1817         u64 discarded_bytes = 0;
1818         struct btrfs_bio *bbio = NULL;
1819
1820
1821         /* Tell the block device(s) that the sectors can be discarded */
1822         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1823                               bytenr, &num_bytes, &bbio, 0);
1824         /* Error condition is -ENOMEM */
1825         if (!ret) {
1826                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1827                 int i;
1828
1829
1830                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1831                         if (!stripe->dev->can_discard)
1832                                 continue;
1833
1834                         ret = btrfs_issue_discard(stripe->dev->bdev,
1835                                                   stripe->physical,
1836                                                   stripe->length);
1837                         if (!ret)
1838                                 discarded_bytes += stripe->length;
1839                         else if (ret != -EOPNOTSUPP)
1840                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1841
1842                         /*
1843                          * Just in case we get back EOPNOTSUPP for some reason,
1844                          * just ignore the return value so we don't screw up
1845                          * people calling discard_extent.
1846                          */
1847                         ret = 0;
1848                 }
1849                 kfree(bbio);
1850         }
1851
1852         if (actual_bytes)
1853                 *actual_bytes = discarded_bytes;
1854
1855
1856         if (ret == -EOPNOTSUPP)
1857                 ret = 0;
1858         return ret;
1859 }
1860
1861 /* Can return -ENOMEM */
1862 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1863                          struct btrfs_root *root,
1864                          u64 bytenr, u64 num_bytes, u64 parent,
1865                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1866 {
1867         int ret;
1868         struct btrfs_fs_info *fs_info = root->fs_info;
1869
1870         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1871                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1872
1873         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1874                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1875                                         num_bytes,
1876                                         parent, root_objectid, (int)owner,
1877                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1878         } else {
1879                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1880                                         num_bytes,
1881                                         parent, root_objectid, owner, offset,
1882                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1883         }
1884         return ret;
1885 }
1886
1887 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1888                                   struct btrfs_root *root,
1889                                   u64 bytenr, u64 num_bytes,
1890                                   u64 parent, u64 root_objectid,
1891                                   u64 owner, u64 offset, int refs_to_add,
1892                                   struct btrfs_delayed_extent_op *extent_op)
1893 {
1894         struct btrfs_path *path;
1895         struct extent_buffer *leaf;
1896         struct btrfs_extent_item *item;
1897         u64 refs;
1898         int ret;
1899         int err = 0;
1900
1901         path = btrfs_alloc_path();
1902         if (!path)
1903                 return -ENOMEM;
1904
1905         path->reada = 1;
1906         path->leave_spinning = 1;
1907         /* this will setup the path even if it fails to insert the back ref */
1908         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1909                                            path, bytenr, num_bytes, parent,
1910                                            root_objectid, owner, offset,
1911                                            refs_to_add, extent_op);
1912         if (ret == 0)
1913                 goto out;
1914
1915         if (ret != -EAGAIN) {
1916                 err = ret;
1917                 goto out;
1918         }
1919
1920         leaf = path->nodes[0];
1921         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1922         refs = btrfs_extent_refs(leaf, item);
1923         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1924         if (extent_op)
1925                 __run_delayed_extent_op(extent_op, leaf, item);
1926
1927         btrfs_mark_buffer_dirty(leaf);
1928         btrfs_release_path(path);
1929
1930         path->reada = 1;
1931         path->leave_spinning = 1;
1932
1933         /* now insert the actual backref */
1934         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1935                                     path, bytenr, parent, root_objectid,
1936                                     owner, offset, refs_to_add);
1937         if (ret)
1938                 btrfs_abort_transaction(trans, root, ret);
1939 out:
1940         btrfs_free_path(path);
1941         return err;
1942 }
1943
1944 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1945                                 struct btrfs_root *root,
1946                                 struct btrfs_delayed_ref_node *node,
1947                                 struct btrfs_delayed_extent_op *extent_op,
1948                                 int insert_reserved)
1949 {
1950         int ret = 0;
1951         struct btrfs_delayed_data_ref *ref;
1952         struct btrfs_key ins;
1953         u64 parent = 0;
1954         u64 ref_root = 0;
1955         u64 flags = 0;
1956
1957         ins.objectid = node->bytenr;
1958         ins.offset = node->num_bytes;
1959         ins.type = BTRFS_EXTENT_ITEM_KEY;
1960
1961         ref = btrfs_delayed_node_to_data_ref(node);
1962         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1963                 parent = ref->parent;
1964         else
1965                 ref_root = ref->root;
1966
1967         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1968                 if (extent_op) {
1969                         BUG_ON(extent_op->update_key);
1970                         flags |= extent_op->flags_to_set;
1971                 }
1972                 ret = alloc_reserved_file_extent(trans, root,
1973                                                  parent, ref_root, flags,
1974                                                  ref->objectid, ref->offset,
1975                                                  &ins, node->ref_mod);
1976         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1977                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1978                                              node->num_bytes, parent,
1979                                              ref_root, ref->objectid,
1980                                              ref->offset, node->ref_mod,
1981                                              extent_op);
1982         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1983                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1984                                           node->num_bytes, parent,
1985                                           ref_root, ref->objectid,
1986                                           ref->offset, node->ref_mod,
1987                                           extent_op);
1988         } else {
1989                 BUG();
1990         }
1991         return ret;
1992 }
1993
1994 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1995                                     struct extent_buffer *leaf,
1996                                     struct btrfs_extent_item *ei)
1997 {
1998         u64 flags = btrfs_extent_flags(leaf, ei);
1999         if (extent_op->update_flags) {
2000                 flags |= extent_op->flags_to_set;
2001                 btrfs_set_extent_flags(leaf, ei, flags);
2002         }
2003
2004         if (extent_op->update_key) {
2005                 struct btrfs_tree_block_info *bi;
2006                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2007                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2008                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2009         }
2010 }
2011
2012 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2013                                  struct btrfs_root *root,
2014                                  struct btrfs_delayed_ref_node *node,
2015                                  struct btrfs_delayed_extent_op *extent_op)
2016 {
2017         struct btrfs_key key;
2018         struct btrfs_path *path;
2019         struct btrfs_extent_item *ei;
2020         struct extent_buffer *leaf;
2021         u32 item_size;
2022         int ret;
2023         int err = 0;
2024
2025         if (trans->aborted)
2026                 return 0;
2027
2028         path = btrfs_alloc_path();
2029         if (!path)
2030                 return -ENOMEM;
2031
2032         key.objectid = node->bytenr;
2033         key.type = BTRFS_EXTENT_ITEM_KEY;
2034         key.offset = node->num_bytes;
2035
2036         path->reada = 1;
2037         path->leave_spinning = 1;
2038         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2039                                 path, 0, 1);
2040         if (ret < 0) {
2041                 err = ret;
2042                 goto out;
2043         }
2044         if (ret > 0) {
2045                 err = -EIO;
2046                 goto out;
2047         }
2048
2049         leaf = path->nodes[0];
2050         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2051 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2052         if (item_size < sizeof(*ei)) {
2053                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2054                                              path, (u64)-1, 0);
2055                 if (ret < 0) {
2056                         err = ret;
2057                         goto out;
2058                 }
2059                 leaf = path->nodes[0];
2060                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2061         }
2062 #endif
2063         BUG_ON(item_size < sizeof(*ei));
2064         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2065         __run_delayed_extent_op(extent_op, leaf, ei);
2066
2067         btrfs_mark_buffer_dirty(leaf);
2068 out:
2069         btrfs_free_path(path);
2070         return err;
2071 }
2072
2073 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2074                                 struct btrfs_root *root,
2075                                 struct btrfs_delayed_ref_node *node,
2076                                 struct btrfs_delayed_extent_op *extent_op,
2077                                 int insert_reserved)
2078 {
2079         int ret = 0;
2080         struct btrfs_delayed_tree_ref *ref;
2081         struct btrfs_key ins;
2082         u64 parent = 0;
2083         u64 ref_root = 0;
2084
2085         ins.objectid = node->bytenr;
2086         ins.offset = node->num_bytes;
2087         ins.type = BTRFS_EXTENT_ITEM_KEY;
2088
2089         ref = btrfs_delayed_node_to_tree_ref(node);
2090         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2091                 parent = ref->parent;
2092         else
2093                 ref_root = ref->root;
2094
2095         BUG_ON(node->ref_mod != 1);
2096         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2097                 BUG_ON(!extent_op || !extent_op->update_flags ||
2098                        !extent_op->update_key);
2099                 ret = alloc_reserved_tree_block(trans, root,
2100                                                 parent, ref_root,
2101                                                 extent_op->flags_to_set,
2102                                                 &extent_op->key,
2103                                                 ref->level, &ins);
2104         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2105                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2106                                              node->num_bytes, parent, ref_root,
2107                                              ref->level, 0, 1, extent_op);
2108         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2109                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2110                                           node->num_bytes, parent, ref_root,
2111                                           ref->level, 0, 1, extent_op);
2112         } else {
2113                 BUG();
2114         }
2115         return ret;
2116 }
2117
2118 /* helper function to actually process a single delayed ref entry */
2119 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2120                                struct btrfs_root *root,
2121                                struct btrfs_delayed_ref_node *node,
2122                                struct btrfs_delayed_extent_op *extent_op,
2123                                int insert_reserved)
2124 {
2125         int ret = 0;
2126
2127         if (trans->aborted)
2128                 return 0;
2129
2130         if (btrfs_delayed_ref_is_head(node)) {
2131                 struct btrfs_delayed_ref_head *head;
2132                 /*
2133                  * we've hit the end of the chain and we were supposed
2134                  * to insert this extent into the tree.  But, it got
2135                  * deleted before we ever needed to insert it, so all
2136                  * we have to do is clean up the accounting
2137                  */
2138                 BUG_ON(extent_op);
2139                 head = btrfs_delayed_node_to_head(node);
2140                 if (insert_reserved) {
2141                         btrfs_pin_extent(root, node->bytenr,
2142                                          node->num_bytes, 1);
2143                         if (head->is_data) {
2144                                 ret = btrfs_del_csums(trans, root,
2145                                                       node->bytenr,
2146                                                       node->num_bytes);
2147                         }
2148                 }
2149                 return ret;
2150         }
2151
2152         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2153             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2154                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2155                                            insert_reserved);
2156         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2157                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2158                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2159                                            insert_reserved);
2160         else
2161                 BUG();
2162         return ret;
2163 }
2164
2165 static noinline struct btrfs_delayed_ref_node *
2166 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2167 {
2168         struct rb_node *node;
2169         struct btrfs_delayed_ref_node *ref;
2170         int action = BTRFS_ADD_DELAYED_REF;
2171 again:
2172         /*
2173          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2174          * this prevents ref count from going down to zero when
2175          * there still are pending delayed ref.
2176          */
2177         node = rb_prev(&head->node.rb_node);
2178         while (1) {
2179                 if (!node)
2180                         break;
2181                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2182                                 rb_node);
2183                 if (ref->bytenr != head->node.bytenr)
2184                         break;
2185                 if (ref->action == action)
2186                         return ref;
2187                 node = rb_prev(node);
2188         }
2189         if (action == BTRFS_ADD_DELAYED_REF) {
2190                 action = BTRFS_DROP_DELAYED_REF;
2191                 goto again;
2192         }
2193         return NULL;
2194 }
2195
2196 /*
2197  * Returns 0 on success or if called with an already aborted transaction.
2198  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2199  */
2200 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2201                                        struct btrfs_root *root,
2202                                        struct list_head *cluster)
2203 {
2204         struct btrfs_delayed_ref_root *delayed_refs;
2205         struct btrfs_delayed_ref_node *ref;
2206         struct btrfs_delayed_ref_head *locked_ref = NULL;
2207         struct btrfs_delayed_extent_op *extent_op;
2208         struct btrfs_fs_info *fs_info = root->fs_info;
2209         int ret;
2210         int count = 0;
2211         int must_insert_reserved = 0;
2212
2213         delayed_refs = &trans->transaction->delayed_refs;
2214         while (1) {
2215                 if (!locked_ref) {
2216                         /* pick a new head ref from the cluster list */
2217                         if (list_empty(cluster))
2218                                 break;
2219
2220                         locked_ref = list_entry(cluster->next,
2221                                      struct btrfs_delayed_ref_head, cluster);
2222
2223                         /* grab the lock that says we are going to process
2224                          * all the refs for this head */
2225                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2226
2227                         /*
2228                          * we may have dropped the spin lock to get the head
2229                          * mutex lock, and that might have given someone else
2230                          * time to free the head.  If that's true, it has been
2231                          * removed from our list and we can move on.
2232                          */
2233                         if (ret == -EAGAIN) {
2234                                 locked_ref = NULL;
2235                                 count++;
2236                                 continue;
2237                         }
2238                 }
2239
2240                 /*
2241                  * We need to try and merge add/drops of the same ref since we
2242                  * can run into issues with relocate dropping the implicit ref
2243                  * and then it being added back again before the drop can
2244                  * finish.  If we merged anything we need to re-loop so we can
2245                  * get a good ref.
2246                  */
2247                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2248                                          locked_ref);
2249
2250                 /*
2251                  * locked_ref is the head node, so we have to go one
2252                  * node back for any delayed ref updates
2253                  */
2254                 ref = select_delayed_ref(locked_ref);
2255
2256                 if (ref && ref->seq &&
2257                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2258                         /*
2259                          * there are still refs with lower seq numbers in the
2260                          * process of being added. Don't run this ref yet.
2261                          */
2262                         list_del_init(&locked_ref->cluster);
2263                         btrfs_delayed_ref_unlock(locked_ref);
2264                         locked_ref = NULL;
2265                         delayed_refs->num_heads_ready++;
2266                         spin_unlock(&delayed_refs->lock);
2267                         cond_resched();
2268                         spin_lock(&delayed_refs->lock);
2269                         continue;
2270                 }
2271
2272                 /*
2273                  * record the must insert reserved flag before we
2274                  * drop the spin lock.
2275                  */
2276                 must_insert_reserved = locked_ref->must_insert_reserved;
2277                 locked_ref->must_insert_reserved = 0;
2278
2279                 extent_op = locked_ref->extent_op;
2280                 locked_ref->extent_op = NULL;
2281
2282                 if (!ref) {
2283                         /* All delayed refs have been processed, Go ahead
2284                          * and send the head node to run_one_delayed_ref,
2285                          * so that any accounting fixes can happen
2286                          */
2287                         ref = &locked_ref->node;
2288
2289                         if (extent_op && must_insert_reserved) {
2290                                 btrfs_free_delayed_extent_op(extent_op);
2291                                 extent_op = NULL;
2292                         }
2293
2294                         if (extent_op) {
2295                                 spin_unlock(&delayed_refs->lock);
2296
2297                                 ret = run_delayed_extent_op(trans, root,
2298                                                             ref, extent_op);
2299                                 btrfs_free_delayed_extent_op(extent_op);
2300
2301                                 if (ret) {
2302                                         printk(KERN_DEBUG
2303                                                "btrfs: run_delayed_extent_op "
2304                                                "returned %d\n", ret);
2305                                         spin_lock(&delayed_refs->lock);
2306                                         btrfs_delayed_ref_unlock(locked_ref);
2307                                         return ret;
2308                                 }
2309
2310                                 goto next;
2311                         }
2312                 }
2313
2314                 ref->in_tree = 0;
2315                 rb_erase(&ref->rb_node, &delayed_refs->root);
2316                 delayed_refs->num_entries--;
2317                 if (!btrfs_delayed_ref_is_head(ref)) {
2318                         /*
2319                          * when we play the delayed ref, also correct the
2320                          * ref_mod on head
2321                          */
2322                         switch (ref->action) {
2323                         case BTRFS_ADD_DELAYED_REF:
2324                         case BTRFS_ADD_DELAYED_EXTENT:
2325                                 locked_ref->node.ref_mod -= ref->ref_mod;
2326                                 break;
2327                         case BTRFS_DROP_DELAYED_REF:
2328                                 locked_ref->node.ref_mod += ref->ref_mod;
2329                                 break;
2330                         default:
2331                                 WARN_ON(1);
2332                         }
2333                 }
2334                 spin_unlock(&delayed_refs->lock);
2335
2336                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2337                                           must_insert_reserved);
2338
2339                 btrfs_free_delayed_extent_op(extent_op);
2340                 if (ret) {
2341                         btrfs_delayed_ref_unlock(locked_ref);
2342                         btrfs_put_delayed_ref(ref);
2343                         printk(KERN_DEBUG
2344                                "btrfs: run_one_delayed_ref returned %d\n", ret);
2345                         spin_lock(&delayed_refs->lock);
2346                         return ret;
2347                 }
2348
2349                 /*
2350                  * If this node is a head, that means all the refs in this head
2351                  * have been dealt with, and we will pick the next head to deal
2352                  * with, so we must unlock the head and drop it from the cluster
2353                  * list before we release it.
2354                  */
2355                 if (btrfs_delayed_ref_is_head(ref)) {
2356                         list_del_init(&locked_ref->cluster);
2357                         btrfs_delayed_ref_unlock(locked_ref);
2358                         locked_ref = NULL;
2359                 }
2360                 btrfs_put_delayed_ref(ref);
2361                 count++;
2362 next:
2363                 cond_resched();
2364                 spin_lock(&delayed_refs->lock);
2365         }
2366         return count;
2367 }
2368
2369 #ifdef SCRAMBLE_DELAYED_REFS
2370 /*
2371  * Normally delayed refs get processed in ascending bytenr order. This
2372  * correlates in most cases to the order added. To expose dependencies on this
2373  * order, we start to process the tree in the middle instead of the beginning
2374  */
2375 static u64 find_middle(struct rb_root *root)
2376 {
2377         struct rb_node *n = root->rb_node;
2378         struct btrfs_delayed_ref_node *entry;
2379         int alt = 1;
2380         u64 middle;
2381         u64 first = 0, last = 0;
2382
2383         n = rb_first(root);
2384         if (n) {
2385                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2386                 first = entry->bytenr;
2387         }
2388         n = rb_last(root);
2389         if (n) {
2390                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2391                 last = entry->bytenr;
2392         }
2393         n = root->rb_node;
2394
2395         while (n) {
2396                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2397                 WARN_ON(!entry->in_tree);
2398
2399                 middle = entry->bytenr;
2400
2401                 if (alt)
2402                         n = n->rb_left;
2403                 else
2404                         n = n->rb_right;
2405
2406                 alt = 1 - alt;
2407         }
2408         return middle;
2409 }
2410 #endif
2411
2412 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2413                                          struct btrfs_fs_info *fs_info)
2414 {
2415         struct qgroup_update *qgroup_update;
2416         int ret = 0;
2417
2418         if (list_empty(&trans->qgroup_ref_list) !=
2419             !trans->delayed_ref_elem.seq) {
2420                 /* list without seq or seq without list */
2421                 printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2422                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2423                         trans->delayed_ref_elem.seq);
2424                 BUG();
2425         }
2426
2427         if (!trans->delayed_ref_elem.seq)
2428                 return 0;
2429
2430         while (!list_empty(&trans->qgroup_ref_list)) {
2431                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2432                                                  struct qgroup_update, list);
2433                 list_del(&qgroup_update->list);
2434                 if (!ret)
2435                         ret = btrfs_qgroup_account_ref(
2436                                         trans, fs_info, qgroup_update->node,
2437                                         qgroup_update->extent_op);
2438                 kfree(qgroup_update);
2439         }
2440
2441         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2442
2443         return ret;
2444 }
2445
2446 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2447                       int count)
2448 {
2449         int val = atomic_read(&delayed_refs->ref_seq);
2450
2451         if (val < seq || val >= seq + count)
2452                 return 1;
2453         return 0;
2454 }
2455
2456 /*
2457  * this starts processing the delayed reference count updates and
2458  * extent insertions we have queued up so far.  count can be
2459  * 0, which means to process everything in the tree at the start
2460  * of the run (but not newly added entries), or it can be some target
2461  * number you'd like to process.
2462  *
2463  * Returns 0 on success or if called with an aborted transaction
2464  * Returns <0 on error and aborts the transaction
2465  */
2466 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2467                            struct btrfs_root *root, unsigned long count)
2468 {
2469         struct rb_node *node;
2470         struct btrfs_delayed_ref_root *delayed_refs;
2471         struct btrfs_delayed_ref_node *ref;
2472         struct list_head cluster;
2473         int ret;
2474         u64 delayed_start;
2475         int run_all = count == (unsigned long)-1;
2476         int run_most = 0;
2477         int loops;
2478
2479         /* We'll clean this up in btrfs_cleanup_transaction */
2480         if (trans->aborted)
2481                 return 0;
2482
2483         if (root == root->fs_info->extent_root)
2484                 root = root->fs_info->tree_root;
2485
2486         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2487
2488         delayed_refs = &trans->transaction->delayed_refs;
2489         INIT_LIST_HEAD(&cluster);
2490         if (count == 0) {
2491                 count = delayed_refs->num_entries * 2;
2492                 run_most = 1;
2493         }
2494
2495         if (!run_all && !run_most) {
2496                 int old;
2497                 int seq = atomic_read(&delayed_refs->ref_seq);
2498
2499 progress:
2500                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2501                 if (old) {
2502                         DEFINE_WAIT(__wait);
2503                         if (delayed_refs->num_entries < 16348)
2504                                 return 0;
2505
2506                         prepare_to_wait(&delayed_refs->wait, &__wait,
2507                                         TASK_UNINTERRUPTIBLE);
2508
2509                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2510                         if (old) {
2511                                 schedule();
2512                                 finish_wait(&delayed_refs->wait, &__wait);
2513
2514                                 if (!refs_newer(delayed_refs, seq, 256))
2515                                         goto progress;
2516                                 else
2517                                         return 0;
2518                         } else {
2519                                 finish_wait(&delayed_refs->wait, &__wait);
2520                                 goto again;
2521                         }
2522                 }
2523
2524         } else {
2525                 atomic_inc(&delayed_refs->procs_running_refs);
2526         }
2527
2528 again:
2529         loops = 0;
2530         spin_lock(&delayed_refs->lock);
2531
2532 #ifdef SCRAMBLE_DELAYED_REFS
2533         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2534 #endif
2535
2536         while (1) {
2537                 if (!(run_all || run_most) &&
2538                     delayed_refs->num_heads_ready < 64)
2539                         break;
2540
2541                 /*
2542                  * go find something we can process in the rbtree.  We start at
2543                  * the beginning of the tree, and then build a cluster
2544                  * of refs to process starting at the first one we are able to
2545                  * lock
2546                  */
2547                 delayed_start = delayed_refs->run_delayed_start;
2548                 ret = btrfs_find_ref_cluster(trans, &cluster,
2549                                              delayed_refs->run_delayed_start);
2550                 if (ret)
2551                         break;
2552
2553                 ret = run_clustered_refs(trans, root, &cluster);
2554                 if (ret < 0) {
2555                         btrfs_release_ref_cluster(&cluster);
2556                         spin_unlock(&delayed_refs->lock);
2557                         btrfs_abort_transaction(trans, root, ret);
2558                         atomic_dec(&delayed_refs->procs_running_refs);
2559                         return ret;
2560                 }
2561
2562                 atomic_add(ret, &delayed_refs->ref_seq);
2563
2564                 count -= min_t(unsigned long, ret, count);
2565
2566                 if (count == 0)
2567                         break;
2568
2569                 if (delayed_start >= delayed_refs->run_delayed_start) {
2570                         if (loops == 0) {
2571                                 /*
2572                                  * btrfs_find_ref_cluster looped. let's do one
2573                                  * more cycle. if we don't run any delayed ref
2574                                  * during that cycle (because we can't because
2575                                  * all of them are blocked), bail out.
2576                                  */
2577                                 loops = 1;
2578                         } else {
2579                                 /*
2580                                  * no runnable refs left, stop trying
2581                                  */
2582                                 BUG_ON(run_all);
2583                                 break;
2584                         }
2585                 }
2586                 if (ret) {
2587                         /* refs were run, let's reset staleness detection */
2588                         loops = 0;
2589                 }
2590         }
2591
2592         if (run_all) {
2593                 if (!list_empty(&trans->new_bgs)) {
2594                         spin_unlock(&delayed_refs->lock);
2595                         btrfs_create_pending_block_groups(trans, root);
2596                         spin_lock(&delayed_refs->lock);
2597                 }
2598
2599                 node = rb_first(&delayed_refs->root);
2600                 if (!node)
2601                         goto out;
2602                 count = (unsigned long)-1;
2603
2604                 while (node) {
2605                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2606                                        rb_node);
2607                         if (btrfs_delayed_ref_is_head(ref)) {
2608                                 struct btrfs_delayed_ref_head *head;
2609
2610                                 head = btrfs_delayed_node_to_head(ref);
2611                                 atomic_inc(&ref->refs);
2612
2613                                 spin_unlock(&delayed_refs->lock);
2614                                 /*
2615                                  * Mutex was contended, block until it's
2616                                  * released and try again
2617                                  */
2618                                 mutex_lock(&head->mutex);
2619                                 mutex_unlock(&head->mutex);
2620
2621                                 btrfs_put_delayed_ref(ref);
2622                                 cond_resched();
2623                                 goto again;
2624                         }
2625                         node = rb_next(node);
2626                 }
2627                 spin_unlock(&delayed_refs->lock);
2628                 schedule_timeout(1);
2629                 goto again;
2630         }
2631 out:
2632         atomic_dec(&delayed_refs->procs_running_refs);
2633         smp_mb();
2634         if (waitqueue_active(&delayed_refs->wait))
2635                 wake_up(&delayed_refs->wait);
2636
2637         spin_unlock(&delayed_refs->lock);
2638         assert_qgroups_uptodate(trans);
2639         return 0;
2640 }
2641
2642 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2643                                 struct btrfs_root *root,
2644                                 u64 bytenr, u64 num_bytes, u64 flags,
2645                                 int is_data)
2646 {
2647         struct btrfs_delayed_extent_op *extent_op;
2648         int ret;
2649
2650         extent_op = btrfs_alloc_delayed_extent_op();
2651         if (!extent_op)
2652                 return -ENOMEM;
2653
2654         extent_op->flags_to_set = flags;
2655         extent_op->update_flags = 1;
2656         extent_op->update_key = 0;
2657         extent_op->is_data = is_data ? 1 : 0;
2658
2659         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2660                                           num_bytes, extent_op);
2661         if (ret)
2662                 btrfs_free_delayed_extent_op(extent_op);
2663         return ret;
2664 }
2665
2666 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2667                                       struct btrfs_root *root,
2668                                       struct btrfs_path *path,
2669                                       u64 objectid, u64 offset, u64 bytenr)
2670 {
2671         struct btrfs_delayed_ref_head *head;
2672         struct btrfs_delayed_ref_node *ref;
2673         struct btrfs_delayed_data_ref *data_ref;
2674         struct btrfs_delayed_ref_root *delayed_refs;
2675         struct rb_node *node;
2676         int ret = 0;
2677
2678         ret = -ENOENT;
2679         delayed_refs = &trans->transaction->delayed_refs;
2680         spin_lock(&delayed_refs->lock);
2681         head = btrfs_find_delayed_ref_head(trans, bytenr);
2682         if (!head)
2683                 goto out;
2684
2685         if (!mutex_trylock(&head->mutex)) {
2686                 atomic_inc(&head->node.refs);
2687                 spin_unlock(&delayed_refs->lock);
2688
2689                 btrfs_release_path(path);
2690
2691                 /*
2692                  * Mutex was contended, block until it's released and let
2693                  * caller try again
2694                  */
2695                 mutex_lock(&head->mutex);
2696                 mutex_unlock(&head->mutex);
2697                 btrfs_put_delayed_ref(&head->node);
2698                 return -EAGAIN;
2699         }
2700
2701         node = rb_prev(&head->node.rb_node);
2702         if (!node)
2703                 goto out_unlock;
2704
2705         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2706
2707         if (ref->bytenr != bytenr)
2708                 goto out_unlock;
2709
2710         ret = 1;
2711         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2712                 goto out_unlock;
2713
2714         data_ref = btrfs_delayed_node_to_data_ref(ref);
2715
2716         node = rb_prev(node);
2717         if (node) {
2718                 int seq = ref->seq;
2719
2720                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2721                 if (ref->bytenr == bytenr && ref->seq == seq)
2722                         goto out_unlock;
2723         }
2724
2725         if (data_ref->root != root->root_key.objectid ||
2726             data_ref->objectid != objectid || data_ref->offset != offset)
2727                 goto out_unlock;
2728
2729         ret = 0;
2730 out_unlock:
2731         mutex_unlock(&head->mutex);
2732 out:
2733         spin_unlock(&delayed_refs->lock);
2734         return ret;
2735 }
2736
2737 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2738                                         struct btrfs_root *root,
2739                                         struct btrfs_path *path,
2740                                         u64 objectid, u64 offset, u64 bytenr)
2741 {
2742         struct btrfs_root *extent_root = root->fs_info->extent_root;
2743         struct extent_buffer *leaf;
2744         struct btrfs_extent_data_ref *ref;
2745         struct btrfs_extent_inline_ref *iref;
2746         struct btrfs_extent_item *ei;
2747         struct btrfs_key key;
2748         u32 item_size;
2749         int ret;
2750
2751         key.objectid = bytenr;
2752         key.offset = (u64)-1;
2753         key.type = BTRFS_EXTENT_ITEM_KEY;
2754
2755         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2756         if (ret < 0)
2757                 goto out;
2758         BUG_ON(ret == 0); /* Corruption */
2759
2760         ret = -ENOENT;
2761         if (path->slots[0] == 0)
2762                 goto out;
2763
2764         path->slots[0]--;
2765         leaf = path->nodes[0];
2766         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2767
2768         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2769                 goto out;
2770
2771         ret = 1;
2772         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2773 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2774         if (item_size < sizeof(*ei)) {
2775                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2776                 goto out;
2777         }
2778 #endif
2779         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2780
2781         if (item_size != sizeof(*ei) +
2782             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2783                 goto out;
2784
2785         if (btrfs_extent_generation(leaf, ei) <=
2786             btrfs_root_last_snapshot(&root->root_item))
2787                 goto out;
2788
2789         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2790         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2791             BTRFS_EXTENT_DATA_REF_KEY)
2792                 goto out;
2793
2794         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2795         if (btrfs_extent_refs(leaf, ei) !=
2796             btrfs_extent_data_ref_count(leaf, ref) ||
2797             btrfs_extent_data_ref_root(leaf, ref) !=
2798             root->root_key.objectid ||
2799             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2800             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2801                 goto out;
2802
2803         ret = 0;
2804 out:
2805         return ret;
2806 }
2807
2808 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2809                           struct btrfs_root *root,
2810                           u64 objectid, u64 offset, u64 bytenr)
2811 {
2812         struct btrfs_path *path;
2813         int ret;
2814         int ret2;
2815
2816         path = btrfs_alloc_path();
2817         if (!path)
2818                 return -ENOENT;
2819
2820         do {
2821                 ret = check_committed_ref(trans, root, path, objectid,
2822                                           offset, bytenr);
2823                 if (ret && ret != -ENOENT)
2824                         goto out;
2825
2826                 ret2 = check_delayed_ref(trans, root, path, objectid,
2827                                          offset, bytenr);
2828         } while (ret2 == -EAGAIN);
2829
2830         if (ret2 && ret2 != -ENOENT) {
2831                 ret = ret2;
2832                 goto out;
2833         }
2834
2835         if (ret != -ENOENT || ret2 != -ENOENT)
2836                 ret = 0;
2837 out:
2838         btrfs_free_path(path);
2839         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2840                 WARN_ON(ret > 0);
2841         return ret;
2842 }
2843
2844 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2845                            struct btrfs_root *root,
2846                            struct extent_buffer *buf,
2847                            int full_backref, int inc, int for_cow)
2848 {
2849         u64 bytenr;
2850         u64 num_bytes;
2851         u64 parent;
2852         u64 ref_root;
2853         u32 nritems;
2854         struct btrfs_key key;
2855         struct btrfs_file_extent_item *fi;
2856         int i;
2857         int level;
2858         int ret = 0;
2859         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2860                             u64, u64, u64, u64, u64, u64, int);
2861
2862         ref_root = btrfs_header_owner(buf);
2863         nritems = btrfs_header_nritems(buf);
2864         level = btrfs_header_level(buf);
2865
2866         if (!root->ref_cows && level == 0)
2867                 return 0;
2868
2869         if (inc)
2870                 process_func = btrfs_inc_extent_ref;
2871         else
2872                 process_func = btrfs_free_extent;
2873
2874         if (full_backref)
2875                 parent = buf->start;
2876         else
2877                 parent = 0;
2878
2879         for (i = 0; i < nritems; i++) {
2880                 if (level == 0) {
2881                         btrfs_item_key_to_cpu(buf, &key, i);
2882                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2883                                 continue;
2884                         fi = btrfs_item_ptr(buf, i,
2885                                             struct btrfs_file_extent_item);
2886                         if (btrfs_file_extent_type(buf, fi) ==
2887                             BTRFS_FILE_EXTENT_INLINE)
2888                                 continue;
2889                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2890                         if (bytenr == 0)
2891                                 continue;
2892
2893                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2894                         key.offset -= btrfs_file_extent_offset(buf, fi);
2895                         ret = process_func(trans, root, bytenr, num_bytes,
2896                                            parent, ref_root, key.objectid,
2897                                            key.offset, for_cow);
2898                         if (ret)
2899                                 goto fail;
2900                 } else {
2901                         bytenr = btrfs_node_blockptr(buf, i);
2902                         num_bytes = btrfs_level_size(root, level - 1);
2903                         ret = process_func(trans, root, bytenr, num_bytes,
2904                                            parent, ref_root, level - 1, 0,
2905                                            for_cow);
2906                         if (ret)
2907                                 goto fail;
2908                 }
2909         }
2910         return 0;
2911 fail:
2912         return ret;
2913 }
2914
2915 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2916                   struct extent_buffer *buf, int full_backref, int for_cow)
2917 {
2918         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2919 }
2920
2921 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2922                   struct extent_buffer *buf, int full_backref, int for_cow)
2923 {
2924         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2925 }
2926
2927 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2928                                  struct btrfs_root *root,
2929                                  struct btrfs_path *path,
2930                                  struct btrfs_block_group_cache *cache)
2931 {
2932         int ret;
2933         struct btrfs_root *extent_root = root->fs_info->extent_root;
2934         unsigned long bi;
2935         struct extent_buffer *leaf;
2936
2937         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2938         if (ret < 0)
2939                 goto fail;
2940         BUG_ON(ret); /* Corruption */
2941
2942         leaf = path->nodes[0];
2943         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2944         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2945         btrfs_mark_buffer_dirty(leaf);
2946         btrfs_release_path(path);
2947 fail:
2948         if (ret) {
2949                 btrfs_abort_transaction(trans, root, ret);
2950                 return ret;
2951         }
2952         return 0;
2953
2954 }
2955
2956 static struct btrfs_block_group_cache *
2957 next_block_group(struct btrfs_root *root,
2958                  struct btrfs_block_group_cache *cache)
2959 {
2960         struct rb_node *node;
2961         spin_lock(&root->fs_info->block_group_cache_lock);
2962         node = rb_next(&cache->cache_node);
2963         btrfs_put_block_group(cache);
2964         if (node) {
2965                 cache = rb_entry(node, struct btrfs_block_group_cache,
2966                                  cache_node);
2967                 btrfs_get_block_group(cache);
2968         } else
2969                 cache = NULL;
2970         spin_unlock(&root->fs_info->block_group_cache_lock);
2971         return cache;
2972 }
2973
2974 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2975                             struct btrfs_trans_handle *trans,
2976                             struct btrfs_path *path)
2977 {
2978         struct btrfs_root *root = block_group->fs_info->tree_root;
2979         struct inode *inode = NULL;
2980         u64 alloc_hint = 0;
2981         int dcs = BTRFS_DC_ERROR;
2982         int num_pages = 0;
2983         int retries = 0;
2984         int ret = 0;
2985
2986         /*
2987          * If this block group is smaller than 100 megs don't bother caching the
2988          * block group.
2989          */
2990         if (block_group->key.offset < (100 * 1024 * 1024)) {
2991                 spin_lock(&block_group->lock);
2992                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2993                 spin_unlock(&block_group->lock);
2994                 return 0;
2995         }
2996
2997 again:
2998         inode = lookup_free_space_inode(root, block_group, path);
2999         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3000                 ret = PTR_ERR(inode);
3001                 btrfs_release_path(path);
3002                 goto out;
3003         }
3004
3005         if (IS_ERR(inode)) {
3006                 BUG_ON(retries);
3007                 retries++;
3008
3009                 if (block_group->ro)
3010                         goto out_free;
3011
3012                 ret = create_free_space_inode(root, trans, block_group, path);
3013                 if (ret)
3014                         goto out_free;
3015                 goto again;
3016         }
3017
3018         /* We've already setup this transaction, go ahead and exit */
3019         if (block_group->cache_generation == trans->transid &&
3020             i_size_read(inode)) {
3021                 dcs = BTRFS_DC_SETUP;
3022                 goto out_put;
3023         }
3024
3025         /*
3026          * We want to set the generation to 0, that way if anything goes wrong
3027          * from here on out we know not to trust this cache when we load up next
3028          * time.
3029          */
3030         BTRFS_I(inode)->generation = 0;
3031         ret = btrfs_update_inode(trans, root, inode);
3032         WARN_ON(ret);
3033
3034         if (i_size_read(inode) > 0) {
3035                 ret = btrfs_truncate_free_space_cache(root, trans, path,
3036                                                       inode);
3037                 if (ret)
3038                         goto out_put;
3039         }
3040
3041         spin_lock(&block_group->lock);
3042         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3043             !btrfs_test_opt(root, SPACE_CACHE)) {
3044                 /*
3045                  * don't bother trying to write stuff out _if_
3046                  * a) we're not cached,
3047                  * b) we're with nospace_cache mount option.
3048                  */
3049                 dcs = BTRFS_DC_WRITTEN;
3050                 spin_unlock(&block_group->lock);
3051                 goto out_put;
3052         }
3053         spin_unlock(&block_group->lock);
3054
3055         /*
3056          * Try to preallocate enough space based on how big the block group is.
3057          * Keep in mind this has to include any pinned space which could end up
3058          * taking up quite a bit since it's not folded into the other space
3059          * cache.
3060          */
3061         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3062         if (!num_pages)
3063                 num_pages = 1;
3064
3065         num_pages *= 16;
3066         num_pages *= PAGE_CACHE_SIZE;
3067
3068         ret = btrfs_check_data_free_space(inode, num_pages);
3069         if (ret)
3070                 goto out_put;
3071
3072         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3073                                               num_pages, num_pages,
3074                                               &alloc_hint);
3075         if (!ret)
3076                 dcs = BTRFS_DC_SETUP;
3077         btrfs_free_reserved_data_space(inode, num_pages);
3078
3079 out_put:
3080         iput(inode);
3081 out_free:
3082         btrfs_release_path(path);
3083 out:
3084         spin_lock(&block_group->lock);
3085         if (!ret && dcs == BTRFS_DC_SETUP)
3086                 block_group->cache_generation = trans->transid;
3087         block_group->disk_cache_state = dcs;
3088         spin_unlock(&block_group->lock);
3089
3090         return ret;
3091 }
3092
3093 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3094                                    struct btrfs_root *root)
3095 {
3096         struct btrfs_block_group_cache *cache;
3097         int err = 0;
3098         struct btrfs_path *path;
3099         u64 last = 0;
3100
3101         path = btrfs_alloc_path();
3102         if (!path)
3103                 return -ENOMEM;
3104
3105 again:
3106         while (1) {
3107                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3108                 while (cache) {
3109                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3110                                 break;
3111                         cache = next_block_group(root, cache);
3112                 }
3113                 if (!cache) {
3114                         if (last == 0)
3115                                 break;
3116                         last = 0;
3117                         continue;
3118                 }
3119                 err = cache_save_setup(cache, trans, path);
3120                 last = cache->key.objectid + cache->key.offset;
3121                 btrfs_put_block_group(cache);
3122         }
3123
3124         while (1) {
3125                 if (last == 0) {
3126                         err = btrfs_run_delayed_refs(trans, root,
3127                                                      (unsigned long)-1);
3128                         if (err) /* File system offline */
3129                                 goto out;
3130                 }
3131
3132                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3133                 while (cache) {
3134                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3135                                 btrfs_put_block_group(cache);
3136                                 goto again;
3137                         }
3138
3139                         if (cache->dirty)
3140                                 break;
3141                         cache = next_block_group(root, cache);
3142                 }
3143                 if (!cache) {
3144                         if (last == 0)
3145                                 break;
3146                         last = 0;
3147                         continue;
3148                 }
3149
3150                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3151                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3152                 cache->dirty = 0;
3153                 last = cache->key.objectid + cache->key.offset;
3154
3155                 err = write_one_cache_group(trans, root, path, cache);
3156                 if (err) /* File system offline */
3157                         goto out;
3158
3159                 btrfs_put_block_group(cache);
3160         }
3161
3162         while (1) {
3163                 /*
3164                  * I don't think this is needed since we're just marking our
3165                  * preallocated extent as written, but just in case it can't
3166                  * hurt.
3167                  */
3168                 if (last == 0) {
3169                         err = btrfs_run_delayed_refs(trans, root,
3170                                                      (unsigned long)-1);
3171                         if (err) /* File system offline */
3172                                 goto out;
3173                 }
3174
3175                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3176                 while (cache) {
3177                         /*
3178                          * Really this shouldn't happen, but it could if we
3179                          * couldn't write the entire preallocated extent and
3180                          * splitting the extent resulted in a new block.
3181                          */
3182                         if (cache->dirty) {
3183                                 btrfs_put_block_group(cache);
3184                                 goto again;
3185                         }
3186                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3187                                 break;
3188                         cache = next_block_group(root, cache);
3189                 }
3190                 if (!cache) {
3191                         if (last == 0)
3192                                 break;
3193                         last = 0;
3194                         continue;
3195                 }
3196
3197                 err = btrfs_write_out_cache(root, trans, cache, path);
3198
3199                 /*
3200                  * If we didn't have an error then the cache state is still
3201                  * NEED_WRITE, so we can set it to WRITTEN.
3202                  */
3203                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3204                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3205                 last = cache->key.objectid + cache->key.offset;
3206                 btrfs_put_block_group(cache);
3207         }
3208 out:
3209
3210         btrfs_free_path(path);
3211         return err;
3212 }
3213
3214 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3215 {
3216         struct btrfs_block_group_cache *block_group;
3217         int readonly = 0;
3218
3219         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3220         if (!block_group || block_group->ro)
3221                 readonly = 1;
3222         if (block_group)
3223                 btrfs_put_block_group(block_group);
3224         return readonly;
3225 }
3226
3227 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3228                              u64 total_bytes, u64 bytes_used,
3229                              struct btrfs_space_info **space_info)
3230 {
3231         struct btrfs_space_info *found;
3232         int i;
3233         int factor;
3234
3235         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3236                      BTRFS_BLOCK_GROUP_RAID10))
3237                 factor = 2;
3238         else
3239                 factor = 1;
3240
3241         found = __find_space_info(info, flags);
3242         if (found) {
3243                 spin_lock(&found->lock);
3244                 found->total_bytes += total_bytes;
3245                 found->disk_total += total_bytes * factor;
3246                 found->bytes_used += bytes_used;
3247                 found->disk_used += bytes_used * factor;
3248                 found->full = 0;
3249                 spin_unlock(&found->lock);
3250                 *space_info = found;
3251                 return 0;
3252         }
3253         found = kzalloc(sizeof(*found), GFP_NOFS);
3254         if (!found)
3255                 return -ENOMEM;
3256
3257         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3258                 INIT_LIST_HEAD(&found->block_groups[i]);
3259         init_rwsem(&found->groups_sem);
3260         spin_lock_init(&found->lock);
3261         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3262         found->total_bytes = total_bytes;
3263         found->disk_total = total_bytes * factor;
3264         found->bytes_used = bytes_used;
3265         found->disk_used = bytes_used * factor;
3266         found->bytes_pinned = 0;
3267         found->bytes_reserved = 0;
3268         found->bytes_readonly = 0;
3269         found->bytes_may_use = 0;
3270         found->full = 0;
3271         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3272         found->chunk_alloc = 0;
3273         found->flush = 0;
3274         init_waitqueue_head(&found->wait);
3275         *space_info = found;
3276         list_add_rcu(&found->list, &info->space_info);
3277         if (flags & BTRFS_BLOCK_GROUP_DATA)
3278                 info->data_sinfo = found;
3279         return 0;
3280 }
3281
3282 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3283 {
3284         u64 extra_flags = chunk_to_extended(flags) &
3285                                 BTRFS_EXTENDED_PROFILE_MASK;
3286
3287         write_seqlock(&fs_info->profiles_lock);
3288         if (flags & BTRFS_BLOCK_GROUP_DATA)
3289                 fs_info->avail_data_alloc_bits |= extra_flags;
3290         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3291                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3292         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3293                 fs_info->avail_system_alloc_bits |= extra_flags;
3294         write_sequnlock(&fs_info->profiles_lock);
3295 }
3296
3297 /*
3298  * returns target flags in extended format or 0 if restripe for this
3299  * chunk_type is not in progress
3300  *
3301  * should be called with either volume_mutex or balance_lock held
3302  */
3303 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3304 {
3305         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3306         u64 target = 0;
3307
3308         if (!bctl)
3309                 return 0;
3310
3311         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3312             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3313                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3314         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3315                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3316                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3317         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3318                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3319                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3320         }
3321
3322         return target;
3323 }
3324
3325 /*
3326  * @flags: available profiles in extended format (see ctree.h)
3327  *
3328  * Returns reduced profile in chunk format.  If profile changing is in
3329  * progress (either running or paused) picks the target profile (if it's
3330  * already available), otherwise falls back to plain reducing.
3331  */
3332 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3333 {
3334         /*
3335          * we add in the count of missing devices because we want
3336          * to make sure that any RAID levels on a degraded FS
3337          * continue to be honored.
3338          */
3339         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3340                 root->fs_info->fs_devices->missing_devices;
3341         u64 target;
3342         u64 tmp;
3343
3344         /*
3345          * see if restripe for this chunk_type is in progress, if so
3346          * try to reduce to the target profile
3347          */
3348         spin_lock(&root->fs_info->balance_lock);
3349         target = get_restripe_target(root->fs_info, flags);
3350         if (target) {
3351                 /* pick target profile only if it's already available */
3352                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3353                         spin_unlock(&root->fs_info->balance_lock);
3354                         return extended_to_chunk(target);
3355                 }
3356         }
3357         spin_unlock(&root->fs_info->balance_lock);
3358
3359         /* First, mask out the RAID levels which aren't possible */
3360         if (num_devices == 1)
3361                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3362                            BTRFS_BLOCK_GROUP_RAID5);
3363         if (num_devices < 3)
3364                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3365         if (num_devices < 4)
3366                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3367
3368         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3369                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3370                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3371         flags &= ~tmp;
3372
3373         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3374                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3375         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3376                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3377         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3378                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3379         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3380                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3381         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3382                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3383
3384         return extended_to_chunk(flags | tmp);
3385 }
3386
3387 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3388 {
3389         unsigned seq;
3390
3391         do {
3392                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3393
3394                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3395                         flags |= root->fs_info->avail_data_alloc_bits;
3396                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3397                         flags |= root->fs_info->avail_system_alloc_bits;
3398                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3399                         flags |= root->fs_info->avail_metadata_alloc_bits;
3400         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3401
3402         return btrfs_reduce_alloc_profile(root, flags);
3403 }
3404
3405 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3406 {
3407         u64 flags;
3408         u64 ret;
3409
3410         if (data)
3411                 flags = BTRFS_BLOCK_GROUP_DATA;
3412         else if (root == root->fs_info->chunk_root)
3413                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3414         else
3415                 flags = BTRFS_BLOCK_GROUP_METADATA;
3416
3417         ret = get_alloc_profile(root, flags);
3418         return ret;
3419 }
3420
3421 /*
3422  * This will check the space that the inode allocates from to make sure we have
3423  * enough space for bytes.
3424  */
3425 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3426 {
3427         struct btrfs_space_info *data_sinfo;
3428         struct btrfs_root *root = BTRFS_I(inode)->root;
3429         struct btrfs_fs_info *fs_info = root->fs_info;
3430         u64 used;
3431         int ret = 0, committed = 0, alloc_chunk = 1;
3432
3433         /* make sure bytes are sectorsize aligned */
3434         bytes = ALIGN(bytes, root->sectorsize);
3435
3436         if (root == root->fs_info->tree_root ||
3437             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3438                 alloc_chunk = 0;
3439                 committed = 1;
3440         }
3441
3442         data_sinfo = fs_info->data_sinfo;
3443         if (!data_sinfo)
3444                 goto alloc;
3445
3446 again:
3447         /* make sure we have enough space to handle the data first */
3448         spin_lock(&data_sinfo->lock);
3449         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3450                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3451                 data_sinfo->bytes_may_use;
3452
3453         if (used + bytes > data_sinfo->total_bytes) {
3454                 struct btrfs_trans_handle *trans;
3455
3456                 /*
3457                  * if we don't have enough free bytes in this space then we need
3458                  * to alloc a new chunk.
3459                  */
3460                 if (!data_sinfo->full && alloc_chunk) {
3461                         u64 alloc_target;
3462
3463                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3464                         spin_unlock(&data_sinfo->lock);
3465 alloc:
3466                         alloc_target = btrfs_get_alloc_profile(root, 1);
3467                         trans = btrfs_join_transaction(root);
3468                         if (IS_ERR(trans))
3469                                 return PTR_ERR(trans);
3470
3471                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3472                                              alloc_target,
3473                                              CHUNK_ALLOC_NO_FORCE);
3474                         btrfs_end_transaction(trans, root);
3475                         if (ret < 0) {
3476                                 if (ret != -ENOSPC)
3477                                         return ret;
3478                                 else
3479                                         goto commit_trans;
3480                         }
3481
3482                         if (!data_sinfo)
3483                                 data_sinfo = fs_info->data_sinfo;
3484
3485                         goto again;
3486                 }
3487
3488                 /*
3489                  * If we have less pinned bytes than we want to allocate then
3490                  * don't bother committing the transaction, it won't help us.
3491                  */
3492                 if (data_sinfo->bytes_pinned < bytes)
3493                         committed = 1;
3494                 spin_unlock(&data_sinfo->lock);
3495
3496                 /* commit the current transaction and try again */
3497 commit_trans:
3498                 if (!committed &&
3499                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3500                         committed = 1;
3501                         trans = btrfs_join_transaction(root);
3502                         if (IS_ERR(trans))
3503                                 return PTR_ERR(trans);
3504                         ret = btrfs_commit_transaction(trans, root);
3505                         if (ret)
3506                                 return ret;
3507                         goto again;
3508                 }
3509
3510                 return -ENOSPC;
3511         }
3512         data_sinfo->bytes_may_use += bytes;
3513         trace_btrfs_space_reservation(root->fs_info, "space_info",
3514                                       data_sinfo->flags, bytes, 1);
3515         spin_unlock(&data_sinfo->lock);
3516
3517         return 0;
3518 }
3519
3520 /*
3521  * Called if we need to clear a data reservation for this inode.
3522  */
3523 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3524 {
3525         struct btrfs_root *root = BTRFS_I(inode)->root;
3526         struct btrfs_space_info *data_sinfo;
3527
3528         /* make sure bytes are sectorsize aligned */
3529         bytes = ALIGN(bytes, root->sectorsize);
3530
3531         data_sinfo = root->fs_info->data_sinfo;
3532         spin_lock(&data_sinfo->lock);
3533         data_sinfo->bytes_may_use -= bytes;
3534         trace_btrfs_space_reservation(root->fs_info, "space_info",
3535                                       data_sinfo->flags, bytes, 0);
3536         spin_unlock(&data_sinfo->lock);
3537 }
3538
3539 static void force_metadata_allocation(struct btrfs_fs_info *info)
3540 {
3541         struct list_head *head = &info->space_info;
3542         struct btrfs_space_info *found;
3543
3544         rcu_read_lock();
3545         list_for_each_entry_rcu(found, head, list) {
3546                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3547                         found->force_alloc = CHUNK_ALLOC_FORCE;
3548         }
3549         rcu_read_unlock();
3550 }
3551
3552 static int should_alloc_chunk(struct btrfs_root *root,
3553                               struct btrfs_space_info *sinfo, int force)
3554 {
3555         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3556         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3557         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3558         u64 thresh;
3559
3560         if (force == CHUNK_ALLOC_FORCE)
3561                 return 1;
3562
3563         /*
3564          * We need to take into account the global rsv because for all intents
3565          * and purposes it's used space.  Don't worry about locking the
3566          * global_rsv, it doesn't change except when the transaction commits.
3567          */
3568         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3569                 num_allocated += global_rsv->size;
3570
3571         /*
3572          * in limited mode, we want to have some free space up to
3573          * about 1% of the FS size.
3574          */
3575         if (force == CHUNK_ALLOC_LIMITED) {
3576                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3577                 thresh = max_t(u64, 64 * 1024 * 1024,
3578                                div_factor_fine(thresh, 1));
3579
3580                 if (num_bytes - num_allocated < thresh)
3581                         return 1;
3582         }
3583
3584         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3585                 return 0;
3586         return 1;
3587 }
3588
3589 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3590 {
3591         u64 num_dev;
3592
3593         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3594                     BTRFS_BLOCK_GROUP_RAID0 |
3595                     BTRFS_BLOCK_GROUP_RAID5 |
3596                     BTRFS_BLOCK_GROUP_RAID6))
3597                 num_dev = root->fs_info->fs_devices->rw_devices;
3598         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3599                 num_dev = 2;
3600         else
3601                 num_dev = 1;    /* DUP or single */
3602
3603         /* metadata for updaing devices and chunk tree */
3604         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3605 }
3606
3607 static void check_system_chunk(struct btrfs_trans_handle *trans,
3608                                struct btrfs_root *root, u64 type)
3609 {
3610         struct btrfs_space_info *info;
3611         u64 left;
3612         u64 thresh;
3613
3614         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3615         spin_lock(&info->lock);
3616         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3617                 info->bytes_reserved - info->bytes_readonly;
3618         spin_unlock(&info->lock);
3619
3620         thresh = get_system_chunk_thresh(root, type);
3621         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3622                 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3623                        left, thresh, type);
3624                 dump_space_info(info, 0, 0);
3625         }
3626
3627         if (left < thresh) {
3628                 u64 flags;
3629
3630                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3631                 btrfs_alloc_chunk(trans, root, flags);
3632         }
3633 }
3634
3635 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3636                           struct btrfs_root *extent_root, u64 flags, int force)
3637 {
3638         struct btrfs_space_info *space_info;
3639         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3640         int wait_for_alloc = 0;
3641         int ret = 0;
3642
3643         /* Don't re-enter if we're already allocating a chunk */
3644         if (trans->allocating_chunk)
3645                 return -ENOSPC;
3646
3647         space_info = __find_space_info(extent_root->fs_info, flags);
3648         if (!space_info) {
3649                 ret = update_space_info(extent_root->fs_info, flags,
3650                                         0, 0, &space_info);
3651                 BUG_ON(ret); /* -ENOMEM */
3652         }
3653         BUG_ON(!space_info); /* Logic error */
3654
3655 again:
3656         spin_lock(&space_info->lock);
3657         if (force < space_info->force_alloc)
3658                 force = space_info->force_alloc;
3659         if (space_info->full) {
3660                 spin_unlock(&space_info->lock);
3661                 return 0;
3662         }
3663
3664         if (!should_alloc_chunk(extent_root, space_info, force)) {
3665                 spin_unlock(&space_info->lock);
3666                 return 0;
3667         } else if (space_info->chunk_alloc) {
3668                 wait_for_alloc = 1;
3669         } else {
3670                 space_info->chunk_alloc = 1;
3671         }
3672
3673         spin_unlock(&space_info->lock);
3674
3675         mutex_lock(&fs_info->chunk_mutex);
3676
3677         /*
3678          * The chunk_mutex is held throughout the entirety of a chunk
3679          * allocation, so once we've acquired the chunk_mutex we know that the
3680          * other guy is done and we need to recheck and see if we should
3681          * allocate.
3682          */
3683         if (wait_for_alloc) {
3684                 mutex_unlock(&fs_info->chunk_mutex);
3685                 wait_for_alloc = 0;
3686                 goto again;
3687         }
3688
3689         trans->allocating_chunk = true;
3690
3691         /*
3692          * If we have mixed data/metadata chunks we want to make sure we keep
3693          * allocating mixed chunks instead of individual chunks.
3694          */
3695         if (btrfs_mixed_space_info(space_info))
3696                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3697
3698         /*
3699          * if we're doing a data chunk, go ahead and make sure that
3700          * we keep a reasonable number of metadata chunks allocated in the
3701          * FS as well.
3702          */
3703         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3704                 fs_info->data_chunk_allocations++;
3705                 if (!(fs_info->data_chunk_allocations %
3706                       fs_info->metadata_ratio))
3707                         force_metadata_allocation(fs_info);
3708         }
3709
3710         /*
3711          * Check if we have enough space in SYSTEM chunk because we may need
3712          * to update devices.
3713          */
3714         check_system_chunk(trans, extent_root, flags);
3715
3716         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3717         trans->allocating_chunk = false;
3718
3719         spin_lock(&space_info->lock);
3720         if (ret < 0 && ret != -ENOSPC)
3721                 goto out;
3722         if (ret)
3723                 space_info->full = 1;
3724         else
3725                 ret = 1;
3726
3727         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3728 out:
3729         space_info->chunk_alloc = 0;
3730         spin_unlock(&space_info->lock);
3731         mutex_unlock(&fs_info->chunk_mutex);
3732         return ret;
3733 }
3734
3735 static int can_overcommit(struct btrfs_root *root,
3736                           struct btrfs_space_info *space_info, u64 bytes,
3737                           enum btrfs_reserve_flush_enum flush)
3738 {
3739         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3740         u64 profile = btrfs_get_alloc_profile(root, 0);
3741         u64 rsv_size = 0;
3742         u64 avail;
3743         u64 used;
3744         u64 to_add;
3745
3746         used = space_info->bytes_used + space_info->bytes_reserved +
3747                 space_info->bytes_pinned + space_info->bytes_readonly;
3748
3749         spin_lock(&global_rsv->lock);
3750         rsv_size = global_rsv->size;
3751         spin_unlock(&global_rsv->lock);
3752
3753         /*
3754          * We only want to allow over committing if we have lots of actual space
3755          * free, but if we don't have enough space to handle the global reserve
3756          * space then we could end up having a real enospc problem when trying
3757          * to allocate a chunk or some other such important allocation.
3758          */
3759         rsv_size <<= 1;
3760         if (used + rsv_size >= space_info->total_bytes)
3761                 return 0;
3762
3763         used += space_info->bytes_may_use;
3764
3765         spin_lock(&root->fs_info->free_chunk_lock);
3766         avail = root->fs_info->free_chunk_space;
3767         spin_unlock(&root->fs_info->free_chunk_lock);
3768
3769         /*
3770          * If we have dup, raid1 or raid10 then only half of the free
3771          * space is actually useable.  For raid56, the space info used
3772          * doesn't include the parity drive, so we don't have to
3773          * change the math
3774          */
3775         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3776                        BTRFS_BLOCK_GROUP_RAID1 |
3777                        BTRFS_BLOCK_GROUP_RAID10))
3778                 avail >>= 1;
3779
3780         to_add = space_info->total_bytes;
3781
3782         /*
3783          * If we aren't flushing all things, let us overcommit up to
3784          * 1/2th of the space. If we can flush, don't let us overcommit
3785          * too much, let it overcommit up to 1/8 of the space.
3786          */
3787         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3788                 to_add >>= 3;
3789         else
3790                 to_add >>= 1;
3791
3792         /*
3793          * Limit the overcommit to the amount of free space we could possibly
3794          * allocate for chunks.
3795          */
3796         to_add = min(avail, to_add);
3797
3798         if (used + bytes < space_info->total_bytes + to_add)
3799                 return 1;
3800         return 0;
3801 }
3802
3803 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3804                                   unsigned long nr_pages)
3805 {
3806         struct super_block *sb = root->fs_info->sb;
3807         int started;
3808
3809         /* If we can not start writeback, just sync all the delalloc file. */
3810         started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
3811                                                       WB_REASON_FS_FREE_SPACE);
3812         if (!started) {
3813                 /*
3814                  * We needn't worry the filesystem going from r/w to r/o though
3815                  * we don't acquire ->s_umount mutex, because the filesystem
3816                  * should guarantee the delalloc inodes list be empty after
3817                  * the filesystem is readonly(all dirty pages are written to
3818                  * the disk).
3819                  */
3820                 btrfs_start_delalloc_inodes(root, 0);
3821                 btrfs_wait_ordered_extents(root, 0);
3822         }
3823 }
3824
3825 /*
3826  * shrink metadata reservation for delalloc
3827  */
3828 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3829                             bool wait_ordered)
3830 {
3831         struct btrfs_block_rsv *block_rsv;
3832         struct btrfs_space_info *space_info;
3833         struct btrfs_trans_handle *trans;
3834         u64 delalloc_bytes;
3835         u64 max_reclaim;
3836         long time_left;
3837         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3838         int loops = 0;
3839         enum btrfs_reserve_flush_enum flush;
3840
3841         trans = (struct btrfs_trans_handle *)current->journal_info;
3842         block_rsv = &root->fs_info->delalloc_block_rsv;
3843         space_info = block_rsv->space_info;
3844
3845         smp_mb();
3846         delalloc_bytes = percpu_counter_sum_positive(
3847                                                 &root->fs_info->delalloc_bytes);
3848         if (delalloc_bytes == 0) {
3849                 if (trans)
3850                         return;
3851                 btrfs_wait_ordered_extents(root, 0);
3852                 return;
3853         }
3854
3855         while (delalloc_bytes && loops < 3) {
3856                 max_reclaim = min(delalloc_bytes, to_reclaim);
3857                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3858                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3859                 /*
3860                  * We need to wait for the async pages to actually start before
3861                  * we do anything.
3862                  */
3863                 wait_event(root->fs_info->async_submit_wait,
3864                            !atomic_read(&root->fs_info->async_delalloc_pages));
3865
3866                 if (!trans)
3867                         flush = BTRFS_RESERVE_FLUSH_ALL;
3868                 else
3869                         flush = BTRFS_RESERVE_NO_FLUSH;
3870                 spin_lock(&space_info->lock);
3871                 if (can_overcommit(root, space_info, orig, flush)) {
3872                         spin_unlock(&space_info->lock);
3873                         break;
3874                 }
3875                 spin_unlock(&space_info->lock);
3876
3877                 loops++;
3878                 if (wait_ordered && !trans) {
3879                         btrfs_wait_ordered_extents(root, 0);
3880                 } else {
3881                         time_left = schedule_timeout_killable(1);
3882                         if (time_left)
3883                                 break;
3884                 }
3885                 smp_mb();
3886                 delalloc_bytes = percpu_counter_sum_positive(
3887                                                 &root->fs_info->delalloc_bytes);
3888         }
3889 }
3890
3891 /**
3892  * maybe_commit_transaction - possibly commit the transaction if its ok to
3893  * @root - the root we're allocating for
3894  * @bytes - the number of bytes we want to reserve
3895  * @force - force the commit
3896  *
3897  * This will check to make sure that committing the transaction will actually
3898  * get us somewhere and then commit the transaction if it does.  Otherwise it
3899  * will return -ENOSPC.
3900  */
3901 static int may_commit_transaction(struct btrfs_root *root,
3902                                   struct btrfs_space_info *space_info,
3903                                   u64 bytes, int force)
3904 {
3905         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3906         struct btrfs_trans_handle *trans;
3907
3908         trans = (struct btrfs_trans_handle *)current->journal_info;
3909         if (trans)
3910                 return -EAGAIN;
3911
3912         if (force)
3913                 goto commit;
3914
3915         /* See if there is enough pinned space to make this reservation */
3916         spin_lock(&space_info->lock);
3917         if (space_info->bytes_pinned >= bytes) {
3918                 spin_unlock(&space_info->lock);
3919                 goto commit;
3920         }
3921         spin_unlock(&space_info->lock);
3922
3923         /*
3924          * See if there is some space in the delayed insertion reservation for
3925          * this reservation.
3926          */
3927         if (space_info != delayed_rsv->space_info)
3928                 return -ENOSPC;
3929
3930         spin_lock(&space_info->lock);
3931         spin_lock(&delayed_rsv->lock);
3932         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3933                 spin_unlock(&delayed_rsv->lock);
3934                 spin_unlock(&space_info->lock);
3935                 return -ENOSPC;
3936         }
3937         spin_unlock(&delayed_rsv->lock);
3938         spin_unlock(&space_info->lock);
3939
3940 commit:
3941         trans = btrfs_join_transaction(root);
3942         if (IS_ERR(trans))
3943                 return -ENOSPC;
3944
3945         return btrfs_commit_transaction(trans, root);
3946 }
3947
3948 enum flush_state {
3949         FLUSH_DELAYED_ITEMS_NR  =       1,
3950         FLUSH_DELAYED_ITEMS     =       2,
3951         FLUSH_DELALLOC          =       3,
3952         FLUSH_DELALLOC_WAIT     =       4,
3953         ALLOC_CHUNK             =       5,
3954         COMMIT_TRANS            =       6,
3955 };
3956
3957 static int flush_space(struct btrfs_root *root,
3958                        struct btrfs_space_info *space_info, u64 num_bytes,
3959                        u64 orig_bytes, int state)
3960 {
3961         struct btrfs_trans_handle *trans;
3962         int nr;
3963         int ret = 0;
3964
3965         switch (state) {
3966         case FLUSH_DELAYED_ITEMS_NR:
3967         case FLUSH_DELAYED_ITEMS:
3968                 if (state == FLUSH_DELAYED_ITEMS_NR) {
3969                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3970
3971                         nr = (int)div64_u64(num_bytes, bytes);
3972                         if (!nr)
3973                                 nr = 1;
3974                         nr *= 2;
3975                 } else {
3976                         nr = -1;
3977                 }
3978                 trans = btrfs_join_transaction(root);
3979                 if (IS_ERR(trans)) {
3980                         ret = PTR_ERR(trans);
3981                         break;
3982                 }
3983                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
3984                 btrfs_end_transaction(trans, root);
3985                 break;
3986         case FLUSH_DELALLOC:
3987         case FLUSH_DELALLOC_WAIT:
3988                 shrink_delalloc(root, num_bytes, orig_bytes,
3989                                 state == FLUSH_DELALLOC_WAIT);
3990                 break;
3991         case ALLOC_CHUNK:
3992                 trans = btrfs_join_transaction(root);
3993                 if (IS_ERR(trans)) {
3994                         ret = PTR_ERR(trans);
3995                         break;
3996                 }
3997                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3998                                      btrfs_get_alloc_profile(root, 0),
3999                                      CHUNK_ALLOC_NO_FORCE);
4000                 btrfs_end_transaction(trans, root);
4001                 if (ret == -ENOSPC)
4002                         ret = 0;
4003                 break;
4004         case COMMIT_TRANS:
4005                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4006                 break;
4007         default:
4008                 ret = -ENOSPC;
4009                 break;
4010         }
4011
4012         return ret;
4013 }
4014 /**
4015  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4016  * @root - the root we're allocating for
4017  * @block_rsv - the block_rsv we're allocating for
4018  * @orig_bytes - the number of bytes we want
4019  * @flush - whether or not we can flush to make our reservation
4020  *
4021  * This will reserve orgi_bytes number of bytes from the space info associated
4022  * with the block_rsv.  If there is not enough space it will make an attempt to
4023  * flush out space to make room.  It will do this by flushing delalloc if
4024  * possible or committing the transaction.  If flush is 0 then no attempts to
4025  * regain reservations will be made and this will fail if there is not enough
4026  * space already.
4027  */
4028 static int reserve_metadata_bytes(struct btrfs_root *root,
4029                                   struct btrfs_block_rsv *block_rsv,
4030                                   u64 orig_bytes,
4031                                   enum btrfs_reserve_flush_enum flush)
4032 {
4033         struct btrfs_space_info *space_info = block_rsv->space_info;
4034         u64 used;
4035         u64 num_bytes = orig_bytes;
4036         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4037         int ret = 0;
4038         bool flushing = false;
4039
4040 again:
4041         ret = 0;
4042         spin_lock(&space_info->lock);
4043         /*
4044          * We only want to wait if somebody other than us is flushing and we
4045          * are actually allowed to flush all things.
4046          */
4047         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4048                space_info->flush) {
4049                 spin_unlock(&space_info->lock);
4050                 /*
4051                  * If we have a trans handle we can't wait because the flusher
4052                  * may have to commit the transaction, which would mean we would
4053                  * deadlock since we are waiting for the flusher to finish, but
4054                  * hold the current transaction open.
4055                  */
4056                 if (current->journal_info)
4057                         return -EAGAIN;
4058                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4059                 /* Must have been killed, return */
4060                 if (ret)
4061                         return -EINTR;
4062
4063                 spin_lock(&space_info->lock);
4064         }
4065
4066         ret = -ENOSPC;
4067         used = space_info->bytes_used + space_info->bytes_reserved +
4068                 space_info->bytes_pinned + space_info->bytes_readonly +
4069                 space_info->bytes_may_use;
4070
4071         /*
4072          * The idea here is that we've not already over-reserved the block group
4073          * then we can go ahead and save our reservation first and then start
4074          * flushing if we need to.  Otherwise if we've already overcommitted
4075          * lets start flushing stuff first and then come back and try to make
4076          * our reservation.
4077          */
4078         if (used <= space_info->total_bytes) {
4079                 if (used + orig_bytes <= space_info->total_bytes) {
4080                         space_info->bytes_may_use += orig_bytes;
4081                         trace_btrfs_space_reservation(root->fs_info,
4082                                 "space_info", space_info->flags, orig_bytes, 1);
4083                         ret = 0;
4084                 } else {
4085                         /*
4086                          * Ok set num_bytes to orig_bytes since we aren't
4087                          * overocmmitted, this way we only try and reclaim what
4088                          * we need.
4089                          */
4090                         num_bytes = orig_bytes;
4091                 }
4092         } else {
4093                 /*
4094                  * Ok we're over committed, set num_bytes to the overcommitted
4095                  * amount plus the amount of bytes that we need for this
4096                  * reservation.
4097                  */
4098                 num_bytes = used - space_info->total_bytes +
4099                         (orig_bytes * 2);
4100         }
4101
4102         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4103                 space_info->bytes_may_use += orig_bytes;
4104                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4105                                               space_info->flags, orig_bytes,
4106                                               1);
4107                 ret = 0;
4108         }
4109
4110         /*
4111          * Couldn't make our reservation, save our place so while we're trying
4112          * to reclaim space we can actually use it instead of somebody else
4113          * stealing it from us.
4114          *
4115          * We make the other tasks wait for the flush only when we can flush
4116          * all things.
4117          */
4118         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4119                 flushing = true;
4120                 space_info->flush = 1;
4121         }
4122
4123         spin_unlock(&space_info->lock);
4124
4125         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4126                 goto out;
4127
4128         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4129                           flush_state);
4130         flush_state++;
4131
4132         /*
4133          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4134          * would happen. So skip delalloc flush.
4135          */
4136         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4137             (flush_state == FLUSH_DELALLOC ||
4138              flush_state == FLUSH_DELALLOC_WAIT))
4139                 flush_state = ALLOC_CHUNK;
4140
4141         if (!ret)
4142                 goto again;
4143         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4144                  flush_state < COMMIT_TRANS)
4145                 goto again;
4146         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4147                  flush_state <= COMMIT_TRANS)
4148                 goto again;
4149
4150 out:
4151         if (ret == -ENOSPC &&
4152             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4153                 struct btrfs_block_rsv *global_rsv =
4154                         &root->fs_info->global_block_rsv;
4155
4156                 if (block_rsv != global_rsv &&
4157                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4158                         ret = 0;
4159         }
4160         if (flushing) {
4161                 spin_lock(&space_info->lock);
4162                 space_info->flush = 0;
4163                 wake_up_all(&space_info->wait);
4164                 spin_unlock(&space_info->lock);
4165         }
4166         return ret;
4167 }
4168
4169 static struct btrfs_block_rsv *get_block_rsv(
4170                                         const struct btrfs_trans_handle *trans,
4171                                         const struct btrfs_root *root)
4172 {
4173         struct btrfs_block_rsv *block_rsv = NULL;
4174
4175         if (root->ref_cows)
4176                 block_rsv = trans->block_rsv;
4177
4178         if (root == root->fs_info->csum_root && trans->adding_csums)
4179                 block_rsv = trans->block_rsv;
4180
4181         if (!block_rsv)
4182                 block_rsv = root->block_rsv;
4183
4184         if (!block_rsv)
4185                 block_rsv = &root->fs_info->empty_block_rsv;
4186
4187         return block_rsv;
4188 }
4189
4190 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4191                                u64 num_bytes)
4192 {
4193         int ret = -ENOSPC;
4194         spin_lock(&block_rsv->lock);
4195         if (block_rsv->reserved >= num_bytes) {
4196                 block_rsv->reserved -= num_bytes;
4197                 if (block_rsv->reserved < block_rsv->size)
4198                         block_rsv->full = 0;
4199                 ret = 0;
4200         }
4201         spin_unlock(&block_rsv->lock);
4202         return ret;
4203 }
4204
4205 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4206                                 u64 num_bytes, int update_size)
4207 {
4208         spin_lock(&block_rsv->lock);
4209         block_rsv->reserved += num_bytes;
4210         if (update_size)
4211                 block_rsv->size += num_bytes;
4212         else if (block_rsv->reserved >= block_rsv->size)
4213                 block_rsv->full = 1;
4214         spin_unlock(&block_rsv->lock);
4215 }
4216
4217 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4218                                     struct btrfs_block_rsv *block_rsv,
4219                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4220 {
4221         struct btrfs_space_info *space_info = block_rsv->space_info;
4222
4223         spin_lock(&block_rsv->lock);
4224         if (num_bytes == (u64)-1)
4225                 num_bytes = block_rsv->size;
4226         block_rsv->size -= num_bytes;
4227         if (block_rsv->reserved >= block_rsv->size) {
4228                 num_bytes = block_rsv->reserved - block_rsv->size;
4229                 block_rsv->reserved = block_rsv->size;
4230                 block_rsv->full = 1;
4231         } else {
4232                 num_bytes = 0;
4233         }
4234         spin_unlock(&block_rsv->lock);
4235
4236         if (num_bytes > 0) {
4237                 if (dest) {
4238                         spin_lock(&dest->lock);
4239                         if (!dest->full) {
4240                                 u64 bytes_to_add;
4241
4242                                 bytes_to_add = dest->size - dest->reserved;
4243                                 bytes_to_add = min(num_bytes, bytes_to_add);
4244                                 dest->reserved += bytes_to_add;
4245                                 if (dest->reserved >= dest->size)
4246                                         dest->full = 1;
4247                                 num_bytes -= bytes_to_add;
4248                         }
4249                         spin_unlock(&dest->lock);
4250                 }
4251                 if (num_bytes) {
4252                         spin_lock(&space_info->lock);
4253                         space_info->bytes_may_use -= num_bytes;
4254                         trace_btrfs_space_reservation(fs_info, "space_info",
4255                                         space_info->flags, num_bytes, 0);
4256                         space_info->reservation_progress++;
4257                         spin_unlock(&space_info->lock);
4258                 }
4259         }
4260 }
4261
4262 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4263                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4264 {
4265         int ret;
4266
4267         ret = block_rsv_use_bytes(src, num_bytes);
4268         if (ret)
4269                 return ret;
4270
4271         block_rsv_add_bytes(dst, num_bytes, 1);
4272         return 0;
4273 }
4274
4275 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4276 {
4277         memset(rsv, 0, sizeof(*rsv));
4278         spin_lock_init(&rsv->lock);
4279         rsv->type = type;
4280 }
4281
4282 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4283                                               unsigned short type)
4284 {
4285         struct btrfs_block_rsv *block_rsv;
4286         struct btrfs_fs_info *fs_info = root->fs_info;
4287
4288         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4289         if (!block_rsv)
4290                 return NULL;
4291
4292         btrfs_init_block_rsv(block_rsv, type);
4293         block_rsv->space_info = __find_space_info(fs_info,
4294                                                   BTRFS_BLOCK_GROUP_METADATA);
4295         return block_rsv;
4296 }
4297
4298 void btrfs_free_block_rsv(struct btrfs_root *root,
4299                           struct btrfs_block_rsv *rsv)
4300 {
4301         if (!rsv)
4302                 return;
4303         btrfs_block_rsv_release(root, rsv, (u64)-1);
4304         kfree(rsv);
4305 }
4306
4307 int btrfs_block_rsv_add(struct btrfs_root *root,
4308                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4309                         enum btrfs_reserve_flush_enum flush)
4310 {
4311         int ret;
4312
4313         if (num_bytes == 0)
4314                 return 0;
4315
4316         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4317         if (!ret) {
4318                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4319                 return 0;
4320         }
4321
4322         return ret;
4323 }
4324
4325 int btrfs_block_rsv_check(struct btrfs_root *root,
4326                           struct btrfs_block_rsv *block_rsv, int min_factor)
4327 {
4328         u64 num_bytes = 0;
4329         int ret = -ENOSPC;
4330
4331         if (!block_rsv)
4332                 return 0;
4333
4334         spin_lock(&block_rsv->lock);
4335         num_bytes = div_factor(block_rsv->size, min_factor);
4336         if (block_rsv->reserved >= num_bytes)
4337                 ret = 0;
4338         spin_unlock(&block_rsv->lock);
4339
4340         return ret;
4341 }
4342
4343 int btrfs_block_rsv_refill(struct btrfs_root *root,
4344                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4345                            enum btrfs_reserve_flush_enum flush)
4346 {
4347         u64 num_bytes = 0;
4348         int ret = -ENOSPC;
4349
4350         if (!block_rsv)
4351                 return 0;
4352
4353         spin_lock(&block_rsv->lock);
4354         num_bytes = min_reserved;
4355         if (block_rsv->reserved >= num_bytes)
4356                 ret = 0;
4357         else
4358                 num_bytes -= block_rsv->reserved;
4359         spin_unlock(&block_rsv->lock);
4360
4361         if (!ret)
4362                 return 0;
4363
4364         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4365         if (!ret) {
4366                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4367                 return 0;
4368         }
4369
4370         return ret;
4371 }
4372
4373 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4374                             struct btrfs_block_rsv *dst_rsv,
4375                             u64 num_bytes)
4376 {
4377         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4378 }
4379
4380 void btrfs_block_rsv_release(struct btrfs_root *root,
4381                              struct btrfs_block_rsv *block_rsv,
4382                              u64 num_bytes)
4383 {
4384         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4385         if (global_rsv->full || global_rsv == block_rsv ||
4386             block_rsv->space_info != global_rsv->space_info)
4387                 global_rsv = NULL;
4388         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4389                                 num_bytes);
4390 }
4391
4392 /*
4393  * helper to calculate size of global block reservation.
4394  * the desired value is sum of space used by extent tree,
4395  * checksum tree and root tree
4396  */
4397 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4398 {
4399         struct btrfs_space_info *sinfo;
4400         u64 num_bytes;
4401         u64 meta_used;
4402         u64 data_used;
4403         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4404
4405         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4406         spin_lock(&sinfo->lock);
4407         data_used = sinfo->bytes_used;
4408         spin_unlock(&sinfo->lock);
4409
4410         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4411         spin_lock(&sinfo->lock);
4412         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4413                 data_used = 0;
4414         meta_used = sinfo->bytes_used;
4415         spin_unlock(&sinfo->lock);
4416
4417         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4418                     csum_size * 2;
4419         num_bytes += div64_u64(data_used + meta_used, 50);
4420
4421         if (num_bytes * 3 > meta_used)
4422                 num_bytes = div64_u64(meta_used, 3);
4423
4424         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4425 }
4426
4427 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4428 {
4429         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4430         struct btrfs_space_info *sinfo = block_rsv->space_info;
4431         u64 num_bytes;
4432
4433         num_bytes = calc_global_metadata_size(fs_info);
4434
4435         spin_lock(&sinfo->lock);
4436         spin_lock(&block_rsv->lock);
4437
4438         block_rsv->size = num_bytes;
4439
4440         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4441                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4442                     sinfo->bytes_may_use;
4443
4444         if (sinfo->total_bytes > num_bytes) {
4445                 num_bytes = sinfo->total_bytes - num_bytes;
4446                 block_rsv->reserved += num_bytes;
4447                 sinfo->bytes_may_use += num_bytes;
4448                 trace_btrfs_space_reservation(fs_info, "space_info",
4449                                       sinfo->flags, num_bytes, 1);
4450         }
4451
4452         if (block_rsv->reserved >= block_rsv->size) {
4453                 num_bytes = block_rsv->reserved - block_rsv->size;
4454                 sinfo->bytes_may_use -= num_bytes;
4455                 trace_btrfs_space_reservation(fs_info, "space_info",
4456                                       sinfo->flags, num_bytes, 0);
4457                 sinfo->reservation_progress++;
4458                 block_rsv->reserved = block_rsv->size;
4459                 block_rsv->full = 1;
4460         }
4461
4462         spin_unlock(&block_rsv->lock);
4463         spin_unlock(&sinfo->lock);
4464 }
4465
4466 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4467 {
4468         struct btrfs_space_info *space_info;
4469
4470         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4471         fs_info->chunk_block_rsv.space_info = space_info;
4472
4473         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4474         fs_info->global_block_rsv.space_info = space_info;
4475         fs_info->delalloc_block_rsv.space_info = space_info;
4476         fs_info->trans_block_rsv.space_info = space_info;
4477         fs_info->empty_block_rsv.space_info = space_info;
4478         fs_info->delayed_block_rsv.space_info = space_info;
4479
4480         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4481         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4482         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4483         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4484         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4485
4486         update_global_block_rsv(fs_info);
4487 }
4488
4489 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4490 {
4491         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4492                                 (u64)-1);
4493         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4494         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4495         WARN_ON(fs_info->trans_block_rsv.size > 0);
4496         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4497         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4498         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4499         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4500         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4501 }
4502
4503 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4504                                   struct btrfs_root *root)
4505 {
4506         if (!trans->block_rsv)
4507                 return;
4508
4509         if (!trans->bytes_reserved)
4510                 return;
4511
4512         trace_btrfs_space_reservation(root->fs_info, "transaction",
4513                                       trans->transid, trans->bytes_reserved, 0);
4514         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4515         trans->bytes_reserved = 0;
4516 }
4517
4518 /* Can only return 0 or -ENOSPC */
4519 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4520                                   struct inode *inode)
4521 {
4522         struct btrfs_root *root = BTRFS_I(inode)->root;
4523         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4524         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4525
4526         /*
4527          * We need to hold space in order to delete our orphan item once we've
4528          * added it, so this takes the reservation so we can release it later
4529          * when we are truly done with the orphan item.
4530          */
4531         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4532         trace_btrfs_space_reservation(root->fs_info, "orphan",
4533                                       btrfs_ino(inode), num_bytes, 1);
4534         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4535 }
4536
4537 void btrfs_orphan_release_metadata(struct inode *inode)
4538 {
4539         struct btrfs_root *root = BTRFS_I(inode)->root;
4540         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4541         trace_btrfs_space_reservation(root->fs_info, "orphan",
4542                                       btrfs_ino(inode), num_bytes, 0);
4543         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4544 }
4545
4546 /*
4547  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4548  * root: the root of the parent directory
4549  * rsv: block reservation
4550  * items: the number of items that we need do reservation
4551  * qgroup_reserved: used to return the reserved size in qgroup
4552  *
4553  * This function is used to reserve the space for snapshot/subvolume
4554  * creation and deletion. Those operations are different with the
4555  * common file/directory operations, they change two fs/file trees
4556  * and root tree, the number of items that the qgroup reserves is
4557  * different with the free space reservation. So we can not use
4558  * the space reseravtion mechanism in start_transaction().
4559  */
4560 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4561                                      struct btrfs_block_rsv *rsv,
4562                                      int items,
4563                                      u64 *qgroup_reserved)
4564 {
4565         u64 num_bytes;
4566         int ret;
4567
4568         if (root->fs_info->quota_enabled) {
4569                 /* One for parent inode, two for dir entries */
4570                 num_bytes = 3 * root->leafsize;
4571                 ret = btrfs_qgroup_reserve(root, num_bytes);
4572                 if (ret)
4573                         return ret;
4574         } else {
4575                 num_bytes = 0;
4576         }
4577
4578         *qgroup_reserved = num_bytes;
4579
4580         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4581         rsv->space_info = __find_space_info(root->fs_info,
4582                                             BTRFS_BLOCK_GROUP_METADATA);
4583         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4584                                   BTRFS_RESERVE_FLUSH_ALL);
4585         if (ret) {
4586                 if (*qgroup_reserved)
4587                         btrfs_qgroup_free(root, *qgroup_reserved);
4588         }
4589
4590         return ret;
4591 }
4592
4593 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4594                                       struct btrfs_block_rsv *rsv,
4595                                       u64 qgroup_reserved)
4596 {
4597         btrfs_block_rsv_release(root, rsv, (u64)-1);
4598         if (qgroup_reserved)
4599                 btrfs_qgroup_free(root, qgroup_reserved);
4600 }
4601
4602 /**
4603  * drop_outstanding_extent - drop an outstanding extent
4604  * @inode: the inode we're dropping the extent for
4605  *
4606  * This is called when we are freeing up an outstanding extent, either called
4607  * after an error or after an extent is written.  This will return the number of
4608  * reserved extents that need to be freed.  This must be called with
4609  * BTRFS_I(inode)->lock held.
4610  */
4611 static unsigned drop_outstanding_extent(struct inode *inode)
4612 {
4613         unsigned drop_inode_space = 0;
4614         unsigned dropped_extents = 0;
4615
4616         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4617         BTRFS_I(inode)->outstanding_extents--;
4618
4619         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4620             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4621                                &BTRFS_I(inode)->runtime_flags))
4622                 drop_inode_space = 1;
4623
4624         /*
4625          * If we have more or the same amount of outsanding extents than we have
4626          * reserved then we need to leave the reserved extents count alone.
4627          */
4628         if (BTRFS_I(inode)->outstanding_extents >=
4629             BTRFS_I(inode)->reserved_extents)
4630                 return drop_inode_space;
4631
4632         dropped_extents = BTRFS_I(inode)->reserved_extents -
4633                 BTRFS_I(inode)->outstanding_extents;
4634         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4635         return dropped_extents + drop_inode_space;
4636 }
4637
4638 /**
4639  * calc_csum_metadata_size - return the amount of metada space that must be
4640  *      reserved/free'd for the given bytes.
4641  * @inode: the inode we're manipulating
4642  * @num_bytes: the number of bytes in question
4643  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4644  *
4645  * This adjusts the number of csum_bytes in the inode and then returns the
4646  * correct amount of metadata that must either be reserved or freed.  We
4647  * calculate how many checksums we can fit into one leaf and then divide the
4648  * number of bytes that will need to be checksumed by this value to figure out
4649  * how many checksums will be required.  If we are adding bytes then the number
4650  * may go up and we will return the number of additional bytes that must be
4651  * reserved.  If it is going down we will return the number of bytes that must
4652  * be freed.
4653  *
4654  * This must be called with BTRFS_I(inode)->lock held.
4655  */
4656 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4657                                    int reserve)
4658 {
4659         struct btrfs_root *root = BTRFS_I(inode)->root;
4660         u64 csum_size;
4661         int num_csums_per_leaf;
4662         int num_csums;
4663         int old_csums;
4664
4665         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4666             BTRFS_I(inode)->csum_bytes == 0)
4667                 return 0;
4668
4669         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4670         if (reserve)
4671                 BTRFS_I(inode)->csum_bytes += num_bytes;
4672         else
4673                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4674         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4675         num_csums_per_leaf = (int)div64_u64(csum_size,
4676                                             sizeof(struct btrfs_csum_item) +
4677                                             sizeof(struct btrfs_disk_key));
4678         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4679         num_csums = num_csums + num_csums_per_leaf - 1;
4680         num_csums = num_csums / num_csums_per_leaf;
4681
4682         old_csums = old_csums + num_csums_per_leaf - 1;
4683         old_csums = old_csums / num_csums_per_leaf;
4684
4685         /* No change, no need to reserve more */
4686         if (old_csums == num_csums)
4687                 return 0;
4688
4689         if (reserve)
4690                 return btrfs_calc_trans_metadata_size(root,
4691                                                       num_csums - old_csums);
4692
4693         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4694 }
4695
4696 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4697 {
4698         struct btrfs_root *root = BTRFS_I(inode)->root;
4699         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4700         u64 to_reserve = 0;
4701         u64 csum_bytes;
4702         unsigned nr_extents = 0;
4703         int extra_reserve = 0;
4704         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4705         int ret = 0;
4706         bool delalloc_lock = true;
4707         u64 to_free = 0;
4708         unsigned dropped;
4709
4710         /* If we are a free space inode we need to not flush since we will be in
4711          * the middle of a transaction commit.  We also don't need the delalloc
4712          * mutex since we won't race with anybody.  We need this mostly to make
4713          * lockdep shut its filthy mouth.
4714          */
4715         if (btrfs_is_free_space_inode(inode)) {
4716                 flush = BTRFS_RESERVE_NO_FLUSH;
4717                 delalloc_lock = false;
4718         }
4719
4720         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4721             btrfs_transaction_in_commit(root->fs_info))
4722                 schedule_timeout(1);
4723
4724         if (delalloc_lock)
4725                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4726
4727         num_bytes = ALIGN(num_bytes, root->sectorsize);
4728
4729         spin_lock(&BTRFS_I(inode)->lock);
4730         BTRFS_I(inode)->outstanding_extents++;
4731
4732         if (BTRFS_I(inode)->outstanding_extents >
4733             BTRFS_I(inode)->reserved_extents)
4734                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4735                         BTRFS_I(inode)->reserved_extents;
4736
4737         /*
4738          * Add an item to reserve for updating the inode when we complete the
4739          * delalloc io.
4740          */
4741         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4742                       &BTRFS_I(inode)->runtime_flags)) {
4743                 nr_extents++;
4744                 extra_reserve = 1;
4745         }
4746
4747         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4748         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4749         csum_bytes = BTRFS_I(inode)->csum_bytes;
4750         spin_unlock(&BTRFS_I(inode)->lock);
4751
4752         if (root->fs_info->quota_enabled) {
4753                 ret = btrfs_qgroup_reserve(root, num_bytes +
4754                                            nr_extents * root->leafsize);
4755                 if (ret)
4756                         goto out_fail;
4757         }
4758
4759         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4760         if (unlikely(ret)) {
4761                 if (root->fs_info->quota_enabled)
4762                         btrfs_qgroup_free(root, num_bytes +
4763                                                 nr_extents * root->leafsize);
4764                 goto out_fail;
4765         }
4766
4767         spin_lock(&BTRFS_I(inode)->lock);
4768         if (extra_reserve) {
4769                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4770                         &BTRFS_I(inode)->runtime_flags);
4771                 nr_extents--;
4772         }
4773         BTRFS_I(inode)->reserved_extents += nr_extents;
4774         spin_unlock(&BTRFS_I(inode)->lock);
4775
4776         if (delalloc_lock)
4777                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4778
4779         if (to_reserve)
4780                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4781                                               btrfs_ino(inode), to_reserve, 1);
4782         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4783
4784         return 0;
4785
4786 out_fail:
4787         spin_lock(&BTRFS_I(inode)->lock);
4788         dropped = drop_outstanding_extent(inode);
4789         /*
4790          * If the inodes csum_bytes is the same as the original
4791          * csum_bytes then we know we haven't raced with any free()ers
4792          * so we can just reduce our inodes csum bytes and carry on.
4793          * Otherwise we have to do the normal free thing to account for
4794          * the case that the free side didn't free up its reserve
4795          * because of this outstanding reservation.
4796          */
4797         if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4798                 calc_csum_metadata_size(inode, num_bytes, 0);
4799         else
4800                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4801         spin_unlock(&BTRFS_I(inode)->lock);
4802         if (dropped)
4803                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4804
4805         if (to_free) {
4806                 btrfs_block_rsv_release(root, block_rsv, to_free);
4807                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
4808                                               btrfs_ino(inode), to_free, 0);
4809         }
4810         if (delalloc_lock)
4811                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4812         return ret;
4813 }
4814
4815 /**
4816  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4817  * @inode: the inode to release the reservation for
4818  * @num_bytes: the number of bytes we're releasing
4819  *
4820  * This will release the metadata reservation for an inode.  This can be called
4821  * once we complete IO for a given set of bytes to release their metadata
4822  * reservations.
4823  */
4824 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4825 {
4826         struct btrfs_root *root = BTRFS_I(inode)->root;
4827         u64 to_free = 0;
4828         unsigned dropped;
4829
4830         num_bytes = ALIGN(num_bytes, root->sectorsize);
4831         spin_lock(&BTRFS_I(inode)->lock);
4832         dropped = drop_outstanding_extent(inode);
4833
4834         if (num_bytes)
4835                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4836         spin_unlock(&BTRFS_I(inode)->lock);
4837         if (dropped > 0)
4838                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4839
4840         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4841                                       btrfs_ino(inode), to_free, 0);
4842         if (root->fs_info->quota_enabled) {
4843                 btrfs_qgroup_free(root, num_bytes +
4844                                         dropped * root->leafsize);
4845         }
4846
4847         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4848                                 to_free);
4849 }
4850
4851 /**
4852  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4853  * @inode: inode we're writing to
4854  * @num_bytes: the number of bytes we want to allocate
4855  *
4856  * This will do the following things
4857  *
4858  * o reserve space in the data space info for num_bytes
4859  * o reserve space in the metadata space info based on number of outstanding
4860  *   extents and how much csums will be needed
4861  * o add to the inodes ->delalloc_bytes
4862  * o add it to the fs_info's delalloc inodes list.
4863  *
4864  * This will return 0 for success and -ENOSPC if there is no space left.
4865  */
4866 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4867 {
4868         int ret;
4869
4870         ret = btrfs_check_data_free_space(inode, num_bytes);
4871         if (ret)
4872                 return ret;
4873
4874         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4875         if (ret) {
4876                 btrfs_free_reserved_data_space(inode, num_bytes);
4877                 return ret;
4878         }
4879
4880         return 0;
4881 }
4882
4883 /**
4884  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4885  * @inode: inode we're releasing space for
4886  * @num_bytes: the number of bytes we want to free up
4887  *
4888  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4889  * called in the case that we don't need the metadata AND data reservations
4890  * anymore.  So if there is an error or we insert an inline extent.
4891  *
4892  * This function will release the metadata space that was not used and will
4893  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4894  * list if there are no delalloc bytes left.
4895  */
4896 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4897 {
4898         btrfs_delalloc_release_metadata(inode, num_bytes);
4899         btrfs_free_reserved_data_space(inode, num_bytes);
4900 }
4901
4902 static int update_block_group(struct btrfs_root *root,
4903                               u64 bytenr, u64 num_bytes, int alloc)
4904 {
4905         struct btrfs_block_group_cache *cache = NULL;
4906         struct btrfs_fs_info *info = root->fs_info;
4907         u64 total = num_bytes;
4908         u64 old_val;
4909         u64 byte_in_group;
4910         int factor;
4911
4912         /* block accounting for super block */
4913         spin_lock(&info->delalloc_lock);
4914         old_val = btrfs_super_bytes_used(info->super_copy);
4915         if (alloc)
4916                 old_val += num_bytes;
4917         else
4918                 old_val -= num_bytes;
4919         btrfs_set_super_bytes_used(info->super_copy, old_val);
4920         spin_unlock(&info->delalloc_lock);
4921
4922         while (total) {
4923                 cache = btrfs_lookup_block_group(info, bytenr);
4924                 if (!cache)
4925                         return -ENOENT;
4926                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4927                                     BTRFS_BLOCK_GROUP_RAID1 |
4928                                     BTRFS_BLOCK_GROUP_RAID10))
4929                         factor = 2;
4930                 else
4931                         factor = 1;
4932                 /*
4933                  * If this block group has free space cache written out, we
4934                  * need to make sure to load it if we are removing space.  This
4935                  * is because we need the unpinning stage to actually add the
4936                  * space back to the block group, otherwise we will leak space.
4937                  */
4938                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4939                         cache_block_group(cache, 1);
4940
4941                 byte_in_group = bytenr - cache->key.objectid;
4942                 WARN_ON(byte_in_group > cache->key.offset);
4943
4944                 spin_lock(&cache->space_info->lock);
4945                 spin_lock(&cache->lock);
4946
4947                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4948                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4949                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4950
4951                 cache->dirty = 1;
4952                 old_val = btrfs_block_group_used(&cache->item);
4953                 num_bytes = min(total, cache->key.offset - byte_in_group);
4954                 if (alloc) {
4955                         old_val += num_bytes;
4956                         btrfs_set_block_group_used(&cache->item, old_val);
4957                         cache->reserved -= num_bytes;
4958                         cache->space_info->bytes_reserved -= num_bytes;
4959                         cache->space_info->bytes_used += num_bytes;
4960                         cache->space_info->disk_used += num_bytes * factor;
4961                         spin_unlock(&cache->lock);
4962                         spin_unlock(&cache->space_info->lock);
4963                 } else {
4964                         old_val -= num_bytes;
4965                         btrfs_set_block_group_used(&cache->item, old_val);
4966                         cache->pinned += num_bytes;
4967                         cache->space_info->bytes_pinned += num_bytes;
4968                         cache->space_info->bytes_used -= num_bytes;
4969                         cache->space_info->disk_used -= num_bytes * factor;
4970                         spin_unlock(&cache->lock);
4971                         spin_unlock(&cache->space_info->lock);
4972
4973                         set_extent_dirty(info->pinned_extents,
4974                                          bytenr, bytenr + num_bytes - 1,
4975                                          GFP_NOFS | __GFP_NOFAIL);
4976                 }
4977                 btrfs_put_block_group(cache);
4978                 total -= num_bytes;
4979                 bytenr += num_bytes;
4980         }
4981         return 0;
4982 }
4983
4984 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4985 {
4986         struct btrfs_block_group_cache *cache;
4987         u64 bytenr;
4988
4989         spin_lock(&root->fs_info->block_group_cache_lock);
4990         bytenr = root->fs_info->first_logical_byte;
4991         spin_unlock(&root->fs_info->block_group_cache_lock);
4992
4993         if (bytenr < (u64)-1)
4994                 return bytenr;
4995
4996         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4997         if (!cache)
4998                 return 0;
4999
5000         bytenr = cache->key.objectid;
5001         btrfs_put_block_group(cache);
5002
5003         return bytenr;
5004 }
5005
5006 static int pin_down_extent(struct btrfs_root *root,
5007                            struct btrfs_block_group_cache *cache,
5008                            u64 bytenr, u64 num_bytes, int reserved)
5009 {
5010         spin_lock(&cache->space_info->lock);
5011         spin_lock(&cache->lock);
5012         cache->pinned += num_bytes;
5013         cache->space_info->bytes_pinned += num_bytes;
5014         if (reserved) {
5015                 cache->reserved -= num_bytes;
5016                 cache->space_info->bytes_reserved -= num_bytes;
5017         }
5018         spin_unlock(&cache->lock);
5019         spin_unlock(&cache->space_info->lock);
5020
5021         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5022                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5023         return 0;
5024 }
5025
5026 /*
5027  * this function must be called within transaction
5028  */
5029 int btrfs_pin_extent(struct btrfs_root *root,
5030                      u64 bytenr, u64 num_bytes, int reserved)
5031 {
5032         struct btrfs_block_group_cache *cache;
5033
5034         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5035         BUG_ON(!cache); /* Logic error */
5036
5037         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5038
5039         btrfs_put_block_group(cache);
5040         return 0;
5041 }
5042
5043 /*
5044  * this function must be called within transaction
5045  */
5046 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5047                                     u64 bytenr, u64 num_bytes)
5048 {
5049         struct btrfs_block_group_cache *cache;
5050
5051         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5052         BUG_ON(!cache); /* Logic error */
5053
5054         /*
5055          * pull in the free space cache (if any) so that our pin
5056          * removes the free space from the cache.  We have load_only set
5057          * to one because the slow code to read in the free extents does check
5058          * the pinned extents.
5059          */
5060         cache_block_group(cache, 1);
5061
5062         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5063
5064         /* remove us from the free space cache (if we're there at all) */
5065         btrfs_remove_free_space(cache, bytenr, num_bytes);
5066         btrfs_put_block_group(cache);
5067         return 0;
5068 }
5069
5070 /**
5071  * btrfs_update_reserved_bytes - update the block_group and space info counters
5072  * @cache:      The cache we are manipulating
5073  * @num_bytes:  The number of bytes in question
5074  * @reserve:    One of the reservation enums
5075  *
5076  * This is called by the allocator when it reserves space, or by somebody who is
5077  * freeing space that was never actually used on disk.  For example if you
5078  * reserve some space for a new leaf in transaction A and before transaction A
5079  * commits you free that leaf, you call this with reserve set to 0 in order to
5080  * clear the reservation.
5081  *
5082  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5083  * ENOSPC accounting.  For data we handle the reservation through clearing the
5084  * delalloc bits in the io_tree.  We have to do this since we could end up
5085  * allocating less disk space for the amount of data we have reserved in the
5086  * case of compression.
5087  *
5088  * If this is a reservation and the block group has become read only we cannot
5089  * make the reservation and return -EAGAIN, otherwise this function always
5090  * succeeds.
5091  */
5092 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5093                                        u64 num_bytes, int reserve)
5094 {
5095         struct btrfs_space_info *space_info = cache->space_info;
5096         int ret = 0;
5097
5098         spin_lock(&space_info->lock);
5099         spin_lock(&cache->lock);
5100         if (reserve != RESERVE_FREE) {
5101                 if (cache->ro) {
5102                         ret = -EAGAIN;
5103                 } else {
5104                         cache->reserved += num_bytes;
5105                         space_info->bytes_reserved += num_bytes;
5106                         if (reserve == RESERVE_ALLOC) {
5107                                 trace_btrfs_space_reservation(cache->fs_info,
5108                                                 "space_info", space_info->flags,
5109                                                 num_bytes, 0);
5110                                 space_info->bytes_may_use -= num_bytes;
5111                         }
5112                 }
5113         } else {
5114                 if (cache->ro)
5115                         space_info->bytes_readonly += num_bytes;
5116                 cache->reserved -= num_bytes;
5117                 space_info->bytes_reserved -= num_bytes;
5118                 space_info->reservation_progress++;
5119         }
5120         spin_unlock(&cache->lock);
5121         spin_unlock(&space_info->lock);
5122         return ret;
5123 }
5124
5125 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5126                                 struct btrfs_root *root)
5127 {
5128         struct btrfs_fs_info *fs_info = root->fs_info;
5129         struct btrfs_caching_control *next;
5130         struct btrfs_caching_control *caching_ctl;
5131         struct btrfs_block_group_cache *cache;
5132
5133         down_write(&fs_info->extent_commit_sem);
5134
5135         list_for_each_entry_safe(caching_ctl, next,
5136                                  &fs_info->caching_block_groups, list) {
5137                 cache = caching_ctl->block_group;
5138                 if (block_group_cache_done(cache)) {
5139                         cache->last_byte_to_unpin = (u64)-1;
5140                         list_del_init(&caching_ctl->list);
5141                         put_caching_control(caching_ctl);
5142                 } else {
5143                         cache->last_byte_to_unpin = caching_ctl->progress;
5144                 }
5145         }
5146
5147         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5148                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5149         else
5150                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5151
5152         up_write(&fs_info->extent_commit_sem);
5153
5154         update_global_block_rsv(fs_info);
5155 }
5156
5157 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5158 {
5159         struct btrfs_fs_info *fs_info = root->fs_info;
5160         struct btrfs_block_group_cache *cache = NULL;
5161         struct btrfs_space_info *space_info;
5162         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5163         u64 len;
5164         bool readonly;
5165
5166         while (start <= end) {
5167                 readonly = false;
5168                 if (!cache ||
5169                     start >= cache->key.objectid + cache->key.offset) {
5170                         if (cache)
5171                                 btrfs_put_block_group(cache);
5172                         cache = btrfs_lookup_block_group(fs_info, start);
5173                         BUG_ON(!cache); /* Logic error */
5174                 }
5175
5176                 len = cache->key.objectid + cache->key.offset - start;
5177                 len = min(len, end + 1 - start);
5178
5179                 if (start < cache->last_byte_to_unpin) {
5180                         len = min(len, cache->last_byte_to_unpin - start);
5181                         btrfs_add_free_space(cache, start, len);
5182                 }
5183
5184                 start += len;
5185                 space_info = cache->space_info;
5186
5187                 spin_lock(&space_info->lock);
5188                 spin_lock(&cache->lock);
5189                 cache->pinned -= len;
5190                 space_info->bytes_pinned -= len;
5191                 if (cache->ro) {
5192                         space_info->bytes_readonly += len;
5193                         readonly = true;
5194                 }
5195                 spin_unlock(&cache->lock);
5196                 if (!readonly && global_rsv->space_info == space_info) {
5197                         spin_lock(&global_rsv->lock);
5198                         if (!global_rsv->full) {
5199                                 len = min(len, global_rsv->size -
5200                                           global_rsv->reserved);
5201                                 global_rsv->reserved += len;
5202                                 space_info->bytes_may_use += len;
5203                                 if (global_rsv->reserved >= global_rsv->size)
5204                                         global_rsv->full = 1;
5205                         }
5206                         spin_unlock(&global_rsv->lock);
5207                 }
5208                 spin_unlock(&space_info->lock);
5209         }
5210
5211         if (cache)
5212                 btrfs_put_block_group(cache);
5213         return 0;
5214 }
5215
5216 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5217                                struct btrfs_root *root)
5218 {
5219         struct btrfs_fs_info *fs_info = root->fs_info;
5220         struct extent_io_tree *unpin;
5221         u64 start;
5222         u64 end;
5223         int ret;
5224
5225         if (trans->aborted)
5226                 return 0;
5227
5228         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5229                 unpin = &fs_info->freed_extents[1];
5230         else
5231                 unpin = &fs_info->freed_extents[0];
5232
5233         while (1) {
5234                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5235                                             EXTENT_DIRTY, NULL);
5236                 if (ret)
5237                         break;
5238
5239                 if (btrfs_test_opt(root, DISCARD))
5240                         ret = btrfs_discard_extent(root, start,
5241                                                    end + 1 - start, NULL);
5242
5243                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5244                 unpin_extent_range(root, start, end);
5245                 cond_resched();
5246         }
5247
5248         return 0;
5249 }
5250
5251 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5252                                 struct btrfs_root *root,
5253                                 u64 bytenr, u64 num_bytes, u64 parent,
5254                                 u64 root_objectid, u64 owner_objectid,
5255                                 u64 owner_offset, int refs_to_drop,
5256                                 struct btrfs_delayed_extent_op *extent_op)
5257 {
5258         struct btrfs_key key;
5259         struct btrfs_path *path;
5260         struct btrfs_fs_info *info = root->fs_info;
5261         struct btrfs_root *extent_root = info->extent_root;
5262         struct extent_buffer *leaf;
5263         struct btrfs_extent_item *ei;
5264         struct btrfs_extent_inline_ref *iref;
5265         int ret;
5266         int is_data;
5267         int extent_slot = 0;
5268         int found_extent = 0;
5269         int num_to_del = 1;
5270         u32 item_size;
5271         u64 refs;
5272
5273         path = btrfs_alloc_path();
5274         if (!path)
5275                 return -ENOMEM;
5276
5277         path->reada = 1;
5278         path->leave_spinning = 1;
5279
5280         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5281         BUG_ON(!is_data && refs_to_drop != 1);
5282
5283         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5284                                     bytenr, num_bytes, parent,
5285                                     root_objectid, owner_objectid,
5286                                     owner_offset);
5287         if (ret == 0) {
5288                 extent_slot = path->slots[0];
5289                 while (extent_slot >= 0) {
5290                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5291                                               extent_slot);
5292                         if (key.objectid != bytenr)
5293                                 break;
5294                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5295                             key.offset == num_bytes) {
5296                                 found_extent = 1;
5297                                 break;
5298                         }
5299                         if (path->slots[0] - extent_slot > 5)
5300                                 break;
5301                         extent_slot--;
5302                 }
5303 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5304                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5305                 if (found_extent && item_size < sizeof(*ei))
5306                         found_extent = 0;
5307 #endif
5308                 if (!found_extent) {
5309                         BUG_ON(iref);
5310                         ret = remove_extent_backref(trans, extent_root, path,
5311                                                     NULL, refs_to_drop,
5312                                                     is_data);
5313                         if (ret) {
5314                                 btrfs_abort_transaction(trans, extent_root, ret);
5315                                 goto out;
5316                         }
5317                         btrfs_release_path(path);
5318                         path->leave_spinning = 1;
5319
5320                         key.objectid = bytenr;
5321                         key.type = BTRFS_EXTENT_ITEM_KEY;
5322                         key.offset = num_bytes;
5323
5324                         ret = btrfs_search_slot(trans, extent_root,
5325                                                 &key, path, -1, 1);
5326                         if (ret) {
5327                                 printk(KERN_ERR "umm, got %d back from search"
5328                                        ", was looking for %llu\n", ret,
5329                                        (unsigned long long)bytenr);
5330                                 if (ret > 0)
5331                                         btrfs_print_leaf(extent_root,
5332                                                          path->nodes[0]);
5333                         }
5334                         if (ret < 0) {
5335                                 btrfs_abort_transaction(trans, extent_root, ret);
5336                                 goto out;
5337                         }
5338                         extent_slot = path->slots[0];
5339                 }
5340         } else if (ret == -ENOENT) {
5341                 btrfs_print_leaf(extent_root, path->nodes[0]);
5342                 WARN_ON(1);
5343                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5344                        "parent %llu root %llu  owner %llu offset %llu\n",
5345                        (unsigned long long)bytenr,
5346                        (unsigned long long)parent,
5347                        (unsigned long long)root_objectid,
5348                        (unsigned long long)owner_objectid,
5349                        (unsigned long long)owner_offset);
5350         } else {
5351                 btrfs_abort_transaction(trans, extent_root, ret);
5352                 goto out;
5353         }
5354
5355         leaf = path->nodes[0];
5356         item_size = btrfs_item_size_nr(leaf, extent_slot);
5357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5358         if (item_size < sizeof(*ei)) {
5359                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5360                 ret = convert_extent_item_v0(trans, extent_root, path,
5361                                              owner_objectid, 0);
5362                 if (ret < 0) {
5363                         btrfs_abort_transaction(trans, extent_root, ret);
5364                         goto out;
5365                 }
5366
5367                 btrfs_release_path(path);
5368                 path->leave_spinning = 1;
5369
5370                 key.objectid = bytenr;
5371                 key.type = BTRFS_EXTENT_ITEM_KEY;
5372                 key.offset = num_bytes;
5373
5374                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5375                                         -1, 1);
5376                 if (ret) {
5377                         printk(KERN_ERR "umm, got %d back from search"
5378                                ", was looking for %llu\n", ret,
5379                                (unsigned long long)bytenr);
5380                         btrfs_print_leaf(extent_root, path->nodes[0]);
5381                 }
5382                 if (ret < 0) {
5383                         btrfs_abort_transaction(trans, extent_root, ret);
5384                         goto out;
5385                 }
5386
5387                 extent_slot = path->slots[0];
5388                 leaf = path->nodes[0];
5389                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5390         }
5391 #endif
5392         BUG_ON(item_size < sizeof(*ei));
5393         ei = btrfs_item_ptr(leaf, extent_slot,
5394                             struct btrfs_extent_item);
5395         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5396                 struct btrfs_tree_block_info *bi;
5397                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5398                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5399                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5400         }
5401
5402         refs = btrfs_extent_refs(leaf, ei);
5403         BUG_ON(refs < refs_to_drop);
5404         refs -= refs_to_drop;
5405
5406         if (refs > 0) {
5407                 if (extent_op)
5408                         __run_delayed_extent_op(extent_op, leaf, ei);
5409                 /*
5410                  * In the case of inline back ref, reference count will
5411                  * be updated by remove_extent_backref
5412                  */
5413                 if (iref) {
5414                         BUG_ON(!found_extent);
5415                 } else {
5416                         btrfs_set_extent_refs(leaf, ei, refs);
5417                         btrfs_mark_buffer_dirty(leaf);
5418                 }
5419                 if (found_extent) {
5420                         ret = remove_extent_backref(trans, extent_root, path,
5421                                                     iref, refs_to_drop,
5422                                                     is_data);
5423                         if (ret) {
5424                                 btrfs_abort_transaction(trans, extent_root, ret);
5425                                 goto out;
5426                         }
5427                 }
5428         } else {
5429                 if (found_extent) {
5430                         BUG_ON(is_data && refs_to_drop !=
5431                                extent_data_ref_count(root, path, iref));
5432                         if (iref) {
5433                                 BUG_ON(path->slots[0] != extent_slot);
5434                         } else {
5435                                 BUG_ON(path->slots[0] != extent_slot + 1);
5436                                 path->slots[0] = extent_slot;
5437                                 num_to_del = 2;
5438                         }
5439                 }
5440
5441                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5442                                       num_to_del);
5443                 if (ret) {
5444                         btrfs_abort_transaction(trans, extent_root, ret);
5445                         goto out;
5446                 }
5447                 btrfs_release_path(path);
5448
5449                 if (is_data) {
5450                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5451                         if (ret) {
5452                                 btrfs_abort_transaction(trans, extent_root, ret);
5453                                 goto out;
5454                         }
5455                 }
5456
5457                 ret = update_block_group(root, bytenr, num_bytes, 0);
5458                 if (ret) {
5459                         btrfs_abort_transaction(trans, extent_root, ret);
5460                         goto out;
5461                 }
5462         }
5463 out:
5464         btrfs_free_path(path);
5465         return ret;
5466 }
5467
5468 /*
5469  * when we free an block, it is possible (and likely) that we free the last
5470  * delayed ref for that extent as well.  This searches the delayed ref tree for
5471  * a given extent, and if there are no other delayed refs to be processed, it
5472  * removes it from the tree.
5473  */
5474 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5475                                       struct btrfs_root *root, u64 bytenr)
5476 {
5477         struct btrfs_delayed_ref_head *head;
5478         struct btrfs_delayed_ref_root *delayed_refs;
5479         struct btrfs_delayed_ref_node *ref;
5480         struct rb_node *node;
5481         int ret = 0;
5482
5483         delayed_refs = &trans->transaction->delayed_refs;
5484         spin_lock(&delayed_refs->lock);
5485         head = btrfs_find_delayed_ref_head(trans, bytenr);
5486         if (!head)
5487                 goto out;
5488
5489         node = rb_prev(&head->node.rb_node);
5490         if (!node)
5491                 goto out;
5492
5493         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5494
5495         /* there are still entries for this ref, we can't drop it */
5496         if (ref->bytenr == bytenr)
5497                 goto out;
5498
5499         if (head->extent_op) {
5500                 if (!head->must_insert_reserved)
5501                         goto out;
5502                 btrfs_free_delayed_extent_op(head->extent_op);
5503                 head->extent_op = NULL;
5504         }
5505
5506         /*
5507          * waiting for the lock here would deadlock.  If someone else has it
5508          * locked they are already in the process of dropping it anyway
5509          */
5510         if (!mutex_trylock(&head->mutex))
5511                 goto out;
5512
5513         /*
5514          * at this point we have a head with no other entries.  Go
5515          * ahead and process it.
5516          */
5517         head->node.in_tree = 0;
5518         rb_erase(&head->node.rb_node, &delayed_refs->root);
5519
5520         delayed_refs->num_entries--;
5521
5522         /*
5523          * we don't take a ref on the node because we're removing it from the
5524          * tree, so we just steal the ref the tree was holding.
5525          */
5526         delayed_refs->num_heads--;
5527         if (list_empty(&head->cluster))
5528                 delayed_refs->num_heads_ready--;
5529
5530         list_del_init(&head->cluster);
5531         spin_unlock(&delayed_refs->lock);
5532
5533         BUG_ON(head->extent_op);
5534         if (head->must_insert_reserved)
5535                 ret = 1;
5536
5537         mutex_unlock(&head->mutex);
5538         btrfs_put_delayed_ref(&head->node);
5539         return ret;
5540 out:
5541         spin_unlock(&delayed_refs->lock);
5542         return 0;
5543 }
5544
5545 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5546                            struct btrfs_root *root,
5547                            struct extent_buffer *buf,
5548                            u64 parent, int last_ref)
5549 {
5550         struct btrfs_block_group_cache *cache = NULL;
5551         int ret;
5552
5553         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5554                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5555                                         buf->start, buf->len,
5556                                         parent, root->root_key.objectid,
5557                                         btrfs_header_level(buf),
5558                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5559                 BUG_ON(ret); /* -ENOMEM */
5560         }
5561
5562         if (!last_ref)
5563                 return;
5564
5565         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5566
5567         if (btrfs_header_generation(buf) == trans->transid) {
5568                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5569                         ret = check_ref_cleanup(trans, root, buf->start);
5570                         if (!ret)
5571                                 goto out;
5572                 }
5573
5574                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5575                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5576                         goto out;
5577                 }
5578
5579                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5580
5581                 btrfs_add_free_space(cache, buf->start, buf->len);
5582                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5583         }
5584 out:
5585         /*
5586          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5587          * anymore.
5588          */
5589         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5590         btrfs_put_block_group(cache);
5591 }
5592
5593 /* Can return -ENOMEM */
5594 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5595                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5596                       u64 owner, u64 offset, int for_cow)
5597 {
5598         int ret;
5599         struct btrfs_fs_info *fs_info = root->fs_info;
5600
5601         /*
5602          * tree log blocks never actually go into the extent allocation
5603          * tree, just update pinning info and exit early.
5604          */
5605         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5606                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5607                 /* unlocks the pinned mutex */
5608                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5609                 ret = 0;
5610         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5611                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5612                                         num_bytes,
5613                                         parent, root_objectid, (int)owner,
5614                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5615         } else {
5616                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5617                                                 num_bytes,
5618                                                 parent, root_objectid, owner,
5619                                                 offset, BTRFS_DROP_DELAYED_REF,
5620                                                 NULL, for_cow);
5621         }
5622         return ret;
5623 }
5624
5625 static u64 stripe_align(struct btrfs_root *root,
5626                         struct btrfs_block_group_cache *cache,
5627                         u64 val, u64 num_bytes)
5628 {
5629         u64 ret = ALIGN(val, root->stripesize);
5630         return ret;
5631 }
5632
5633 /*
5634  * when we wait for progress in the block group caching, its because
5635  * our allocation attempt failed at least once.  So, we must sleep
5636  * and let some progress happen before we try again.
5637  *
5638  * This function will sleep at least once waiting for new free space to
5639  * show up, and then it will check the block group free space numbers
5640  * for our min num_bytes.  Another option is to have it go ahead
5641  * and look in the rbtree for a free extent of a given size, but this
5642  * is a good start.
5643  */
5644 static noinline int
5645 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5646                                 u64 num_bytes)
5647 {
5648         struct btrfs_caching_control *caching_ctl;
5649
5650         caching_ctl = get_caching_control(cache);
5651         if (!caching_ctl)
5652                 return 0;
5653
5654         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5655                    (cache->free_space_ctl->free_space >= num_bytes));
5656
5657         put_caching_control(caching_ctl);
5658         return 0;
5659 }
5660
5661 static noinline int
5662 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5663 {
5664         struct btrfs_caching_control *caching_ctl;
5665
5666         caching_ctl = get_caching_control(cache);
5667         if (!caching_ctl)
5668                 return 0;
5669
5670         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5671
5672         put_caching_control(caching_ctl);
5673         return 0;
5674 }
5675
5676 int __get_raid_index(u64 flags)
5677 {
5678         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5679                 return BTRFS_RAID_RAID10;
5680         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5681                 return BTRFS_RAID_RAID1;
5682         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5683                 return BTRFS_RAID_DUP;
5684         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5685                 return BTRFS_RAID_RAID0;
5686         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5687                 return BTRFS_RAID_RAID5;
5688         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5689                 return BTRFS_RAID_RAID6;
5690
5691         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5692 }
5693
5694 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5695 {
5696         return __get_raid_index(cache->flags);
5697 }
5698
5699 enum btrfs_loop_type {
5700         LOOP_CACHING_NOWAIT = 0,
5701         LOOP_CACHING_WAIT = 1,
5702         LOOP_ALLOC_CHUNK = 2,
5703         LOOP_NO_EMPTY_SIZE = 3,
5704 };
5705
5706 /*
5707  * walks the btree of allocated extents and find a hole of a given size.
5708  * The key ins is changed to record the hole:
5709  * ins->objectid == block start
5710  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5711  * ins->offset == number of blocks
5712  * Any available blocks before search_start are skipped.
5713  */
5714 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5715                                      struct btrfs_root *orig_root,
5716                                      u64 num_bytes, u64 empty_size,
5717                                      u64 hint_byte, struct btrfs_key *ins,
5718                                      u64 data)
5719 {
5720         int ret = 0;
5721         struct btrfs_root *root = orig_root->fs_info->extent_root;
5722         struct btrfs_free_cluster *last_ptr = NULL;
5723         struct btrfs_block_group_cache *block_group = NULL;
5724         struct btrfs_block_group_cache *used_block_group;
5725         u64 search_start = 0;
5726         int empty_cluster = 2 * 1024 * 1024;
5727         struct btrfs_space_info *space_info;
5728         int loop = 0;
5729         int index = __get_raid_index(data);
5730         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5731                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5732         bool found_uncached_bg = false;
5733         bool failed_cluster_refill = false;
5734         bool failed_alloc = false;
5735         bool use_cluster = true;
5736         bool have_caching_bg = false;
5737
5738         WARN_ON(num_bytes < root->sectorsize);
5739         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5740         ins->objectid = 0;
5741         ins->offset = 0;
5742
5743         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5744
5745         space_info = __find_space_info(root->fs_info, data);
5746         if (!space_info) {
5747                 printk(KERN_ERR "No space info for %llu\n", data);
5748                 return -ENOSPC;
5749         }
5750
5751         /*
5752          * If the space info is for both data and metadata it means we have a
5753          * small filesystem and we can't use the clustering stuff.
5754          */
5755         if (btrfs_mixed_space_info(space_info))
5756                 use_cluster = false;
5757
5758         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5759                 last_ptr = &root->fs_info->meta_alloc_cluster;
5760                 if (!btrfs_test_opt(root, SSD))
5761                         empty_cluster = 64 * 1024;
5762         }
5763
5764         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5765             btrfs_test_opt(root, SSD)) {
5766                 last_ptr = &root->fs_info->data_alloc_cluster;
5767         }
5768
5769         if (last_ptr) {
5770                 spin_lock(&last_ptr->lock);
5771                 if (last_ptr->block_group)
5772                         hint_byte = last_ptr->window_start;
5773                 spin_unlock(&last_ptr->lock);
5774         }
5775
5776         search_start = max(search_start, first_logical_byte(root, 0));
5777         search_start = max(search_start, hint_byte);
5778
5779         if (!last_ptr)
5780                 empty_cluster = 0;
5781
5782         if (search_start == hint_byte) {
5783                 block_group = btrfs_lookup_block_group(root->fs_info,
5784                                                        search_start);
5785                 used_block_group = block_group;
5786                 /*
5787                  * we don't want to use the block group if it doesn't match our
5788                  * allocation bits, or if its not cached.
5789                  *
5790                  * However if we are re-searching with an ideal block group
5791                  * picked out then we don't care that the block group is cached.
5792                  */
5793                 if (block_group && block_group_bits(block_group, data) &&
5794                     block_group->cached != BTRFS_CACHE_NO) {
5795                         down_read(&space_info->groups_sem);
5796                         if (list_empty(&block_group->list) ||
5797                             block_group->ro) {
5798                                 /*
5799                                  * someone is removing this block group,
5800                                  * we can't jump into the have_block_group
5801                                  * target because our list pointers are not
5802                                  * valid
5803                                  */
5804                                 btrfs_put_block_group(block_group);
5805                                 up_read(&space_info->groups_sem);
5806                         } else {
5807                                 index = get_block_group_index(block_group);
5808                                 goto have_block_group;
5809                         }
5810                 } else if (block_group) {
5811                         btrfs_put_block_group(block_group);
5812                 }
5813         }
5814 search:
5815         have_caching_bg = false;
5816         down_read(&space_info->groups_sem);
5817         list_for_each_entry(block_group, &space_info->block_groups[index],
5818                             list) {
5819                 u64 offset;
5820                 int cached;
5821
5822                 used_block_group = block_group;
5823                 btrfs_get_block_group(block_group);
5824                 search_start = block_group->key.objectid;
5825
5826                 /*
5827                  * this can happen if we end up cycling through all the
5828                  * raid types, but we want to make sure we only allocate
5829                  * for the proper type.
5830                  */
5831                 if (!block_group_bits(block_group, data)) {
5832                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5833                                 BTRFS_BLOCK_GROUP_RAID1 |
5834                                 BTRFS_BLOCK_GROUP_RAID5 |
5835                                 BTRFS_BLOCK_GROUP_RAID6 |
5836                                 BTRFS_BLOCK_GROUP_RAID10;
5837
5838                         /*
5839                          * if they asked for extra copies and this block group
5840                          * doesn't provide them, bail.  This does allow us to
5841                          * fill raid0 from raid1.
5842                          */
5843                         if ((data & extra) && !(block_group->flags & extra))
5844                                 goto loop;
5845                 }
5846
5847 have_block_group:
5848                 cached = block_group_cache_done(block_group);
5849                 if (unlikely(!cached)) {
5850                         found_uncached_bg = true;
5851                         ret = cache_block_group(block_group, 0);
5852                         BUG_ON(ret < 0);
5853                         ret = 0;
5854                 }
5855
5856                 if (unlikely(block_group->ro))
5857                         goto loop;
5858
5859                 /*
5860                  * Ok we want to try and use the cluster allocator, so
5861                  * lets look there
5862                  */
5863                 if (last_ptr) {
5864                         unsigned long aligned_cluster;
5865                         /*
5866                          * the refill lock keeps out other
5867                          * people trying to start a new cluster
5868                          */
5869                         spin_lock(&last_ptr->refill_lock);
5870                         used_block_group = last_ptr->block_group;
5871                         if (used_block_group != block_group &&
5872                             (!used_block_group ||
5873                              used_block_group->ro ||
5874                              !block_group_bits(used_block_group, data))) {
5875                                 used_block_group = block_group;
5876                                 goto refill_cluster;
5877                         }
5878
5879                         if (used_block_group != block_group)
5880                                 btrfs_get_block_group(used_block_group);
5881
5882                         offset = btrfs_alloc_from_cluster(used_block_group,
5883                           last_ptr, num_bytes, used_block_group->key.objectid);
5884                         if (offset) {
5885                                 /* we have a block, we're done */
5886                                 spin_unlock(&last_ptr->refill_lock);
5887                                 trace_btrfs_reserve_extent_cluster(root,
5888                                         block_group, search_start, num_bytes);
5889                                 goto checks;
5890                         }
5891
5892                         WARN_ON(last_ptr->block_group != used_block_group);
5893                         if (used_block_group != block_group) {
5894                                 btrfs_put_block_group(used_block_group);
5895                                 used_block_group = block_group;
5896                         }
5897 refill_cluster:
5898                         BUG_ON(used_block_group != block_group);
5899                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5900                          * set up a new clusters, so lets just skip it
5901                          * and let the allocator find whatever block
5902                          * it can find.  If we reach this point, we
5903                          * will have tried the cluster allocator
5904                          * plenty of times and not have found
5905                          * anything, so we are likely way too
5906                          * fragmented for the clustering stuff to find
5907                          * anything.
5908                          *
5909                          * However, if the cluster is taken from the
5910                          * current block group, release the cluster
5911                          * first, so that we stand a better chance of
5912                          * succeeding in the unclustered
5913                          * allocation.  */
5914                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5915                             last_ptr->block_group != block_group) {
5916                                 spin_unlock(&last_ptr->refill_lock);
5917                                 goto unclustered_alloc;
5918                         }
5919
5920                         /*
5921                          * this cluster didn't work out, free it and
5922                          * start over
5923                          */
5924                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5925
5926                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5927                                 spin_unlock(&last_ptr->refill_lock);
5928                                 goto unclustered_alloc;
5929                         }
5930
5931                         aligned_cluster = max_t(unsigned long,
5932                                                 empty_cluster + empty_size,
5933                                               block_group->full_stripe_len);
5934
5935                         /* allocate a cluster in this block group */
5936                         ret = btrfs_find_space_cluster(trans, root,
5937                                                block_group, last_ptr,
5938                                                search_start, num_bytes,
5939                                                aligned_cluster);
5940                         if (ret == 0) {
5941                                 /*
5942                                  * now pull our allocation out of this
5943                                  * cluster
5944                                  */
5945                                 offset = btrfs_alloc_from_cluster(block_group,
5946                                                   last_ptr, num_bytes,
5947                                                   search_start);
5948                                 if (offset) {
5949                                         /* we found one, proceed */
5950                                         spin_unlock(&last_ptr->refill_lock);
5951                                         trace_btrfs_reserve_extent_cluster(root,
5952                                                 block_group, search_start,
5953                                                 num_bytes);
5954                                         goto checks;
5955                                 }
5956                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5957                                    && !failed_cluster_refill) {
5958                                 spin_unlock(&last_ptr->refill_lock);
5959
5960                                 failed_cluster_refill = true;
5961                                 wait_block_group_cache_progress(block_group,
5962                                        num_bytes + empty_cluster + empty_size);
5963                                 goto have_block_group;
5964                         }
5965
5966                         /*
5967                          * at this point we either didn't find a cluster
5968                          * or we weren't able to allocate a block from our
5969                          * cluster.  Free the cluster we've been trying
5970                          * to use, and go to the next block group
5971                          */
5972                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5973                         spin_unlock(&last_ptr->refill_lock);
5974                         goto loop;
5975                 }
5976
5977 unclustered_alloc:
5978                 spin_lock(&block_group->free_space_ctl->tree_lock);
5979                 if (cached &&
5980                     block_group->free_space_ctl->free_space <
5981                     num_bytes + empty_cluster + empty_size) {
5982                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5983                         goto loop;
5984                 }
5985                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5986
5987                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5988                                                     num_bytes, empty_size);
5989                 /*
5990                  * If we didn't find a chunk, and we haven't failed on this
5991                  * block group before, and this block group is in the middle of
5992                  * caching and we are ok with waiting, then go ahead and wait
5993                  * for progress to be made, and set failed_alloc to true.
5994                  *
5995                  * If failed_alloc is true then we've already waited on this
5996                  * block group once and should move on to the next block group.
5997                  */
5998                 if (!offset && !failed_alloc && !cached &&
5999                     loop > LOOP_CACHING_NOWAIT) {
6000                         wait_block_group_cache_progress(block_group,
6001                                                 num_bytes + empty_size);
6002                         failed_alloc = true;
6003                         goto have_block_group;
6004                 } else if (!offset) {
6005                         if (!cached)
6006                                 have_caching_bg = true;
6007                         goto loop;
6008                 }
6009 checks:
6010                 search_start = stripe_align(root, used_block_group,
6011                                             offset, num_bytes);
6012
6013                 /* move on to the next group */
6014                 if (search_start + num_bytes >
6015                     used_block_group->key.objectid + used_block_group->key.offset) {
6016                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6017                         goto loop;
6018                 }
6019
6020                 if (offset < search_start)
6021                         btrfs_add_free_space(used_block_group, offset,
6022                                              search_start - offset);
6023                 BUG_ON(offset > search_start);
6024
6025                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6026                                                   alloc_type);
6027                 if (ret == -EAGAIN) {
6028                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6029                         goto loop;
6030                 }
6031
6032                 /* we are all good, lets return */
6033                 ins->objectid = search_start;
6034                 ins->offset = num_bytes;
6035
6036                 trace_btrfs_reserve_extent(orig_root, block_group,
6037                                            search_start, num_bytes);
6038                 if (used_block_group != block_group)
6039                         btrfs_put_block_group(used_block_group);
6040                 btrfs_put_block_group(block_group);
6041                 break;
6042 loop:
6043                 failed_cluster_refill = false;
6044                 failed_alloc = false;
6045                 BUG_ON(index != get_block_group_index(block_group));
6046                 if (used_block_group != block_group)
6047                         btrfs_put_block_group(used_block_group);
6048                 btrfs_put_block_group(block_group);
6049         }
6050         up_read(&space_info->groups_sem);
6051
6052         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6053                 goto search;
6054
6055         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6056                 goto search;
6057
6058         /*
6059          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6060          *                      caching kthreads as we move along
6061          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6062          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6063          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6064          *                      again
6065          */
6066         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6067                 index = 0;
6068                 loop++;
6069                 if (loop == LOOP_ALLOC_CHUNK) {
6070                         ret = do_chunk_alloc(trans, root, data,
6071                                              CHUNK_ALLOC_FORCE);
6072                         /*
6073                          * Do not bail out on ENOSPC since we
6074                          * can do more things.
6075                          */
6076                         if (ret < 0 && ret != -ENOSPC) {
6077                                 btrfs_abort_transaction(trans,
6078                                                         root, ret);
6079                                 goto out;
6080                         }
6081                 }
6082
6083                 if (loop == LOOP_NO_EMPTY_SIZE) {
6084                         empty_size = 0;
6085                         empty_cluster = 0;
6086                 }
6087
6088                 goto search;
6089         } else if (!ins->objectid) {
6090                 ret = -ENOSPC;
6091         } else if (ins->objectid) {
6092                 ret = 0;
6093         }
6094 out:
6095
6096         return ret;
6097 }
6098
6099 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6100                             int dump_block_groups)
6101 {
6102         struct btrfs_block_group_cache *cache;
6103         int index = 0;
6104
6105         spin_lock(&info->lock);
6106         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6107                (unsigned long long)info->flags,
6108                (unsigned long long)(info->total_bytes - info->bytes_used -
6109                                     info->bytes_pinned - info->bytes_reserved -
6110                                     info->bytes_readonly),
6111                (info->full) ? "" : "not ");
6112         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6113                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6114                (unsigned long long)info->total_bytes,
6115                (unsigned long long)info->bytes_used,
6116                (unsigned long long)info->bytes_pinned,
6117                (unsigned long long)info->bytes_reserved,
6118                (unsigned long long)info->bytes_may_use,
6119                (unsigned long long)info->bytes_readonly);
6120         spin_unlock(&info->lock);
6121
6122         if (!dump_block_groups)
6123                 return;
6124
6125         down_read(&info->groups_sem);
6126 again:
6127         list_for_each_entry(cache, &info->block_groups[index], list) {
6128                 spin_lock(&cache->lock);
6129                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6130                        (unsigned long long)cache->key.objectid,
6131                        (unsigned long long)cache->key.offset,
6132                        (unsigned long long)btrfs_block_group_used(&cache->item),
6133                        (unsigned long long)cache->pinned,
6134                        (unsigned long long)cache->reserved,
6135                        cache->ro ? "[readonly]" : "");
6136                 btrfs_dump_free_space(cache, bytes);
6137                 spin_unlock(&cache->lock);
6138         }
6139         if (++index < BTRFS_NR_RAID_TYPES)
6140                 goto again;
6141         up_read(&info->groups_sem);
6142 }
6143
6144 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6145                          struct btrfs_root *root,
6146                          u64 num_bytes, u64 min_alloc_size,
6147                          u64 empty_size, u64 hint_byte,
6148                          struct btrfs_key *ins, u64 data)
6149 {
6150         bool final_tried = false;
6151         int ret;
6152
6153         data = btrfs_get_alloc_profile(root, data);
6154 again:
6155         WARN_ON(num_bytes < root->sectorsize);
6156         ret = find_free_extent(trans, root, num_bytes, empty_size,
6157                                hint_byte, ins, data);
6158
6159         if (ret == -ENOSPC) {
6160                 if (!final_tried) {
6161                         num_bytes = num_bytes >> 1;
6162                         num_bytes = round_down(num_bytes, root->sectorsize);
6163                         num_bytes = max(num_bytes, min_alloc_size);
6164                         if (num_bytes == min_alloc_size)
6165                                 final_tried = true;
6166                         goto again;
6167                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6168                         struct btrfs_space_info *sinfo;
6169
6170                         sinfo = __find_space_info(root->fs_info, data);
6171                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
6172                                "wanted %llu\n", (unsigned long long)data,
6173                                (unsigned long long)num_bytes);
6174                         if (sinfo)
6175                                 dump_space_info(sinfo, num_bytes, 1);
6176                 }
6177         }
6178
6179         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6180
6181         return ret;
6182 }
6183
6184 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6185                                         u64 start, u64 len, int pin)
6186 {
6187         struct btrfs_block_group_cache *cache;
6188         int ret = 0;
6189
6190         cache = btrfs_lookup_block_group(root->fs_info, start);
6191         if (!cache) {
6192                 printk(KERN_ERR "Unable to find block group for %llu\n",
6193                        (unsigned long long)start);
6194                 return -ENOSPC;
6195         }
6196
6197         if (btrfs_test_opt(root, DISCARD))
6198                 ret = btrfs_discard_extent(root, start, len, NULL);
6199
6200         if (pin)
6201                 pin_down_extent(root, cache, start, len, 1);
6202         else {
6203                 btrfs_add_free_space(cache, start, len);
6204                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6205         }
6206         btrfs_put_block_group(cache);
6207
6208         trace_btrfs_reserved_extent_free(root, start, len);
6209
6210         return ret;
6211 }
6212
6213 int btrfs_free_reserved_extent(struct btrfs_root *root,
6214                                         u64 start, u64 len)
6215 {
6216         return __btrfs_free_reserved_extent(root, start, len, 0);
6217 }
6218
6219 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6220                                        u64 start, u64 len)
6221 {
6222         return __btrfs_free_reserved_extent(root, start, len, 1);
6223 }
6224
6225 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6226                                       struct btrfs_root *root,
6227                                       u64 parent, u64 root_objectid,
6228                                       u64 flags, u64 owner, u64 offset,
6229                                       struct btrfs_key *ins, int ref_mod)
6230 {
6231         int ret;
6232         struct btrfs_fs_info *fs_info = root->fs_info;
6233         struct btrfs_extent_item *extent_item;
6234         struct btrfs_extent_inline_ref *iref;
6235         struct btrfs_path *path;
6236         struct extent_buffer *leaf;
6237         int type;
6238         u32 size;
6239
6240         if (parent > 0)
6241                 type = BTRFS_SHARED_DATA_REF_KEY;
6242         else
6243                 type = BTRFS_EXTENT_DATA_REF_KEY;
6244
6245         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6246
6247         path = btrfs_alloc_path();
6248         if (!path)
6249                 return -ENOMEM;
6250
6251         path->leave_spinning = 1;
6252         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6253                                       ins, size);
6254         if (ret) {
6255                 btrfs_free_path(path);
6256                 return ret;
6257         }
6258
6259         leaf = path->nodes[0];
6260         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6261                                      struct btrfs_extent_item);
6262         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6263         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6264         btrfs_set_extent_flags(leaf, extent_item,
6265                                flags | BTRFS_EXTENT_FLAG_DATA);
6266
6267         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6268         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6269         if (parent > 0) {
6270                 struct btrfs_shared_data_ref *ref;
6271                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6272                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6273                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6274         } else {
6275                 struct btrfs_extent_data_ref *ref;
6276                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6277                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6278                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6279                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6280                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6281         }
6282
6283         btrfs_mark_buffer_dirty(path->nodes[0]);
6284         btrfs_free_path(path);
6285
6286         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6287         if (ret) { /* -ENOENT, logic error */
6288                 printk(KERN_ERR "btrfs update block group failed for %llu "
6289                        "%llu\n", (unsigned long long)ins->objectid,
6290                        (unsigned long long)ins->offset);
6291                 BUG();
6292         }
6293         return ret;
6294 }
6295
6296 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6297                                      struct btrfs_root *root,
6298                                      u64 parent, u64 root_objectid,
6299                                      u64 flags, struct btrfs_disk_key *key,
6300                                      int level, struct btrfs_key *ins)
6301 {
6302         int ret;
6303         struct btrfs_fs_info *fs_info = root->fs_info;
6304         struct btrfs_extent_item *extent_item;
6305         struct btrfs_tree_block_info *block_info;
6306         struct btrfs_extent_inline_ref *iref;
6307         struct btrfs_path *path;
6308         struct extent_buffer *leaf;
6309         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6310
6311         path = btrfs_alloc_path();
6312         if (!path)
6313                 return -ENOMEM;
6314
6315         path->leave_spinning = 1;
6316         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6317                                       ins, size);
6318         if (ret) {
6319                 btrfs_free_path(path);
6320                 return ret;
6321         }
6322
6323         leaf = path->nodes[0];
6324         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6325                                      struct btrfs_extent_item);
6326         btrfs_set_extent_refs(leaf, extent_item, 1);
6327         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6328         btrfs_set_extent_flags(leaf, extent_item,
6329                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6330         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6331
6332         btrfs_set_tree_block_key(leaf, block_info, key);
6333         btrfs_set_tree_block_level(leaf, block_info, level);
6334
6335         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6336         if (parent > 0) {
6337                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6338                 btrfs_set_extent_inline_ref_type(leaf, iref,
6339                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6340                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6341         } else {
6342                 btrfs_set_extent_inline_ref_type(leaf, iref,
6343                                                  BTRFS_TREE_BLOCK_REF_KEY);
6344                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6345         }
6346
6347         btrfs_mark_buffer_dirty(leaf);
6348         btrfs_free_path(path);
6349
6350         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6351         if (ret) { /* -ENOENT, logic error */
6352                 printk(KERN_ERR "btrfs update block group failed for %llu "
6353                        "%llu\n", (unsigned long long)ins->objectid,
6354                        (unsigned long long)ins->offset);
6355                 BUG();
6356         }
6357         return ret;
6358 }
6359
6360 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6361                                      struct btrfs_root *root,
6362                                      u64 root_objectid, u64 owner,
6363                                      u64 offset, struct btrfs_key *ins)
6364 {
6365         int ret;
6366
6367         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6368
6369         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6370                                          ins->offset, 0,
6371                                          root_objectid, owner, offset,
6372                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6373         return ret;
6374 }
6375
6376 /*
6377  * this is used by the tree logging recovery code.  It records that
6378  * an extent has been allocated and makes sure to clear the free
6379  * space cache bits as well
6380  */
6381 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6382                                    struct btrfs_root *root,
6383                                    u64 root_objectid, u64 owner, u64 offset,
6384                                    struct btrfs_key *ins)
6385 {
6386         int ret;
6387         struct btrfs_block_group_cache *block_group;
6388         struct btrfs_caching_control *caching_ctl;
6389         u64 start = ins->objectid;
6390         u64 num_bytes = ins->offset;
6391
6392         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6393         cache_block_group(block_group, 0);
6394         caching_ctl = get_caching_control(block_group);
6395
6396         if (!caching_ctl) {
6397                 BUG_ON(!block_group_cache_done(block_group));
6398                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6399                 BUG_ON(ret); /* -ENOMEM */
6400         } else {
6401                 mutex_lock(&caching_ctl->mutex);
6402
6403                 if (start >= caching_ctl->progress) {
6404                         ret = add_excluded_extent(root, start, num_bytes);
6405                         BUG_ON(ret); /* -ENOMEM */
6406                 } else if (start + num_bytes <= caching_ctl->progress) {
6407                         ret = btrfs_remove_free_space(block_group,
6408                                                       start, num_bytes);
6409                         BUG_ON(ret); /* -ENOMEM */
6410                 } else {
6411                         num_bytes = caching_ctl->progress - start;
6412                         ret = btrfs_remove_free_space(block_group,
6413                                                       start, num_bytes);
6414                         BUG_ON(ret); /* -ENOMEM */
6415
6416                         start = caching_ctl->progress;
6417                         num_bytes = ins->objectid + ins->offset -
6418                                     caching_ctl->progress;
6419                         ret = add_excluded_extent(root, start, num_bytes);
6420                         BUG_ON(ret); /* -ENOMEM */
6421                 }
6422
6423                 mutex_unlock(&caching_ctl->mutex);
6424                 put_caching_control(caching_ctl);
6425         }
6426
6427         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6428                                           RESERVE_ALLOC_NO_ACCOUNT);
6429         BUG_ON(ret); /* logic error */
6430         btrfs_put_block_group(block_group);
6431         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6432                                          0, owner, offset, ins, 1);
6433         return ret;
6434 }
6435
6436 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6437                                             struct btrfs_root *root,
6438                                             u64 bytenr, u32 blocksize,
6439                                             int level)
6440 {
6441         struct extent_buffer *buf;
6442
6443         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6444         if (!buf)
6445                 return ERR_PTR(-ENOMEM);
6446         btrfs_set_header_generation(buf, trans->transid);
6447         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6448         btrfs_tree_lock(buf);
6449         clean_tree_block(trans, root, buf);
6450         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6451
6452         btrfs_set_lock_blocking(buf);
6453         btrfs_set_buffer_uptodate(buf);
6454
6455         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6456                 /*
6457                  * we allow two log transactions at a time, use different
6458                  * EXENT bit to differentiate dirty pages.
6459                  */
6460                 if (root->log_transid % 2 == 0)
6461                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6462                                         buf->start + buf->len - 1, GFP_NOFS);
6463                 else
6464                         set_extent_new(&root->dirty_log_pages, buf->start,
6465                                         buf->start + buf->len - 1, GFP_NOFS);
6466         } else {
6467                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6468                          buf->start + buf->len - 1, GFP_NOFS);
6469         }
6470         trans->blocks_used++;
6471         /* this returns a buffer locked for blocking */
6472         return buf;
6473 }
6474
6475 static struct btrfs_block_rsv *
6476 use_block_rsv(struct btrfs_trans_handle *trans,
6477               struct btrfs_root *root, u32 blocksize)
6478 {
6479         struct btrfs_block_rsv *block_rsv;
6480         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6481         int ret;
6482
6483         block_rsv = get_block_rsv(trans, root);
6484
6485         if (block_rsv->size == 0) {
6486                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6487                                              BTRFS_RESERVE_NO_FLUSH);
6488                 /*
6489                  * If we couldn't reserve metadata bytes try and use some from
6490                  * the global reserve.
6491                  */
6492                 if (ret && block_rsv != global_rsv) {
6493                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6494                         if (!ret)
6495                                 return global_rsv;
6496                         return ERR_PTR(ret);
6497                 } else if (ret) {
6498                         return ERR_PTR(ret);
6499                 }
6500                 return block_rsv;
6501         }
6502
6503         ret = block_rsv_use_bytes(block_rsv, blocksize);
6504         if (!ret)
6505                 return block_rsv;
6506         if (ret && !block_rsv->failfast) {
6507                 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6508                         static DEFINE_RATELIMIT_STATE(_rs,
6509                                         DEFAULT_RATELIMIT_INTERVAL * 10,
6510                                         /*DEFAULT_RATELIMIT_BURST*/ 1);
6511                         if (__ratelimit(&_rs))
6512                                 WARN(1, KERN_DEBUG
6513                                         "btrfs: block rsv returned %d\n", ret);
6514                 }
6515                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6516                                              BTRFS_RESERVE_NO_FLUSH);
6517                 if (!ret) {
6518                         return block_rsv;
6519                 } else if (ret && block_rsv != global_rsv) {
6520                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6521                         if (!ret)
6522                                 return global_rsv;
6523                 }
6524         }
6525
6526         return ERR_PTR(-ENOSPC);
6527 }
6528
6529 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6530                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6531 {
6532         block_rsv_add_bytes(block_rsv, blocksize, 0);
6533         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6534 }
6535
6536 /*
6537  * finds a free extent and does all the dirty work required for allocation
6538  * returns the key for the extent through ins, and a tree buffer for
6539  * the first block of the extent through buf.
6540  *
6541  * returns the tree buffer or NULL.
6542  */
6543 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6544                                         struct btrfs_root *root, u32 blocksize,
6545                                         u64 parent, u64 root_objectid,
6546                                         struct btrfs_disk_key *key, int level,
6547                                         u64 hint, u64 empty_size)
6548 {
6549         struct btrfs_key ins;
6550         struct btrfs_block_rsv *block_rsv;
6551         struct extent_buffer *buf;
6552         u64 flags = 0;
6553         int ret;
6554
6555
6556         block_rsv = use_block_rsv(trans, root, blocksize);
6557         if (IS_ERR(block_rsv))
6558                 return ERR_CAST(block_rsv);
6559
6560         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6561                                    empty_size, hint, &ins, 0);
6562         if (ret) {
6563                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6564                 return ERR_PTR(ret);
6565         }
6566
6567         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6568                                     blocksize, level);
6569         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6570
6571         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6572                 if (parent == 0)
6573                         parent = ins.objectid;
6574                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6575         } else
6576                 BUG_ON(parent > 0);
6577
6578         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6579                 struct btrfs_delayed_extent_op *extent_op;
6580                 extent_op = btrfs_alloc_delayed_extent_op();
6581                 BUG_ON(!extent_op); /* -ENOMEM */
6582                 if (key)
6583                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6584                 else
6585                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6586                 extent_op->flags_to_set = flags;
6587                 extent_op->update_key = 1;
6588                 extent_op->update_flags = 1;
6589                 extent_op->is_data = 0;
6590
6591                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6592                                         ins.objectid,
6593                                         ins.offset, parent, root_objectid,
6594                                         level, BTRFS_ADD_DELAYED_EXTENT,
6595                                         extent_op, 0);
6596                 BUG_ON(ret); /* -ENOMEM */
6597         }
6598         return buf;
6599 }
6600
6601 struct walk_control {
6602         u64 refs[BTRFS_MAX_LEVEL];
6603         u64 flags[BTRFS_MAX_LEVEL];
6604         struct btrfs_key update_progress;
6605         int stage;
6606         int level;
6607         int shared_level;
6608         int update_ref;
6609         int keep_locks;
6610         int reada_slot;
6611         int reada_count;
6612         int for_reloc;
6613 };
6614
6615 #define DROP_REFERENCE  1
6616 #define UPDATE_BACKREF  2
6617
6618 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6619                                      struct btrfs_root *root,
6620                                      struct walk_control *wc,
6621                                      struct btrfs_path *path)
6622 {
6623         u64 bytenr;
6624         u64 generation;
6625         u64 refs;
6626         u64 flags;
6627         u32 nritems;
6628         u32 blocksize;
6629         struct btrfs_key key;
6630         struct extent_buffer *eb;
6631         int ret;
6632         int slot;
6633         int nread = 0;
6634
6635         if (path->slots[wc->level] < wc->reada_slot) {
6636                 wc->reada_count = wc->reada_count * 2 / 3;
6637                 wc->reada_count = max(wc->reada_count, 2);
6638         } else {
6639                 wc->reada_count = wc->reada_count * 3 / 2;
6640                 wc->reada_count = min_t(int, wc->reada_count,
6641                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6642         }
6643
6644         eb = path->nodes[wc->level];
6645         nritems = btrfs_header_nritems(eb);
6646         blocksize = btrfs_level_size(root, wc->level - 1);
6647
6648         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6649                 if (nread >= wc->reada_count)
6650                         break;
6651
6652                 cond_resched();
6653                 bytenr = btrfs_node_blockptr(eb, slot);
6654                 generation = btrfs_node_ptr_generation(eb, slot);
6655
6656                 if (slot == path->slots[wc->level])
6657                         goto reada;
6658
6659                 if (wc->stage == UPDATE_BACKREF &&
6660                     generation <= root->root_key.offset)
6661                         continue;
6662
6663                 /* We don't lock the tree block, it's OK to be racy here */
6664                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6665                                                &refs, &flags);
6666                 /* We don't care about errors in readahead. */
6667                 if (ret < 0)
6668                         continue;
6669                 BUG_ON(refs == 0);
6670
6671                 if (wc->stage == DROP_REFERENCE) {
6672                         if (refs == 1)
6673                                 goto reada;
6674
6675                         if (wc->level == 1 &&
6676                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6677                                 continue;
6678                         if (!wc->update_ref ||
6679                             generation <= root->root_key.offset)
6680                                 continue;
6681                         btrfs_node_key_to_cpu(eb, &key, slot);
6682                         ret = btrfs_comp_cpu_keys(&key,
6683                                                   &wc->update_progress);
6684                         if (ret < 0)
6685                                 continue;
6686                 } else {
6687                         if (wc->level == 1 &&
6688                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6689                                 continue;
6690                 }
6691 reada:
6692                 ret = readahead_tree_block(root, bytenr, blocksize,
6693                                            generation);
6694                 if (ret)
6695                         break;
6696                 nread++;
6697         }
6698         wc->reada_slot = slot;
6699 }
6700
6701 /*
6702  * helper to process tree block while walking down the tree.
6703  *
6704  * when wc->stage == UPDATE_BACKREF, this function updates
6705  * back refs for pointers in the block.
6706  *
6707  * NOTE: return value 1 means we should stop walking down.
6708  */
6709 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6710                                    struct btrfs_root *root,
6711                                    struct btrfs_path *path,
6712                                    struct walk_control *wc, int lookup_info)
6713 {
6714         int level = wc->level;
6715         struct extent_buffer *eb = path->nodes[level];
6716         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6717         int ret;
6718
6719         if (wc->stage == UPDATE_BACKREF &&
6720             btrfs_header_owner(eb) != root->root_key.objectid)
6721                 return 1;
6722
6723         /*
6724          * when reference count of tree block is 1, it won't increase
6725          * again. once full backref flag is set, we never clear it.
6726          */
6727         if (lookup_info &&
6728             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6729              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6730                 BUG_ON(!path->locks[level]);
6731                 ret = btrfs_lookup_extent_info(trans, root,
6732                                                eb->start, eb->len,
6733                                                &wc->refs[level],
6734                                                &wc->flags[level]);
6735                 BUG_ON(ret == -ENOMEM);
6736                 if (ret)
6737                         return ret;
6738                 BUG_ON(wc->refs[level] == 0);
6739         }
6740
6741         if (wc->stage == DROP_REFERENCE) {
6742                 if (wc->refs[level] > 1)
6743                         return 1;
6744
6745                 if (path->locks[level] && !wc->keep_locks) {
6746                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6747                         path->locks[level] = 0;
6748                 }
6749                 return 0;
6750         }
6751
6752         /* wc->stage == UPDATE_BACKREF */
6753         if (!(wc->flags[level] & flag)) {
6754                 BUG_ON(!path->locks[level]);
6755                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6756                 BUG_ON(ret); /* -ENOMEM */
6757                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6758                 BUG_ON(ret); /* -ENOMEM */
6759                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6760                                                   eb->len, flag, 0);
6761                 BUG_ON(ret); /* -ENOMEM */
6762                 wc->flags[level] |= flag;
6763         }
6764
6765         /*
6766          * the block is shared by multiple trees, so it's not good to
6767          * keep the tree lock
6768          */
6769         if (path->locks[level] && level > 0) {
6770                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6771                 path->locks[level] = 0;
6772         }
6773         return 0;
6774 }
6775
6776 /*
6777  * helper to process tree block pointer.
6778  *
6779  * when wc->stage == DROP_REFERENCE, this function checks
6780  * reference count of the block pointed to. if the block
6781  * is shared and we need update back refs for the subtree
6782  * rooted at the block, this function changes wc->stage to
6783  * UPDATE_BACKREF. if the block is shared and there is no
6784  * need to update back, this function drops the reference
6785  * to the block.
6786  *
6787  * NOTE: return value 1 means we should stop walking down.
6788  */
6789 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6790                                  struct btrfs_root *root,
6791                                  struct btrfs_path *path,
6792                                  struct walk_control *wc, int *lookup_info)
6793 {
6794         u64 bytenr;
6795         u64 generation;
6796         u64 parent;
6797         u32 blocksize;
6798         struct btrfs_key key;
6799         struct extent_buffer *next;
6800         int level = wc->level;
6801         int reada = 0;
6802         int ret = 0;
6803
6804         generation = btrfs_node_ptr_generation(path->nodes[level],
6805                                                path->slots[level]);
6806         /*
6807          * if the lower level block was created before the snapshot
6808          * was created, we know there is no need to update back refs
6809          * for the subtree
6810          */
6811         if (wc->stage == UPDATE_BACKREF &&
6812             generation <= root->root_key.offset) {
6813                 *lookup_info = 1;
6814                 return 1;
6815         }
6816
6817         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6818         blocksize = btrfs_level_size(root, level - 1);
6819
6820         next = btrfs_find_tree_block(root, bytenr, blocksize);
6821         if (!next) {
6822                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6823                 if (!next)
6824                         return -ENOMEM;
6825                 reada = 1;
6826         }
6827         btrfs_tree_lock(next);
6828         btrfs_set_lock_blocking(next);
6829
6830         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6831                                        &wc->refs[level - 1],
6832                                        &wc->flags[level - 1]);
6833         if (ret < 0) {
6834                 btrfs_tree_unlock(next);
6835                 return ret;
6836         }
6837
6838         BUG_ON(wc->refs[level - 1] == 0);
6839         *lookup_info = 0;
6840
6841         if (wc->stage == DROP_REFERENCE) {
6842                 if (wc->refs[level - 1] > 1) {
6843                         if (level == 1 &&
6844                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6845                                 goto skip;
6846
6847                         if (!wc->update_ref ||
6848                             generation <= root->root_key.offset)
6849                                 goto skip;
6850
6851                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6852                                               path->slots[level]);
6853                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6854                         if (ret < 0)
6855                                 goto skip;
6856
6857                         wc->stage = UPDATE_BACKREF;
6858                         wc->shared_level = level - 1;
6859                 }
6860         } else {
6861                 if (level == 1 &&
6862                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6863                         goto skip;
6864         }
6865
6866         if (!btrfs_buffer_uptodate(next, generation, 0)) {
6867                 btrfs_tree_unlock(next);
6868                 free_extent_buffer(next);
6869                 next = NULL;
6870                 *lookup_info = 1;
6871         }
6872
6873         if (!next) {
6874                 if (reada && level == 1)
6875                         reada_walk_down(trans, root, wc, path);
6876                 next = read_tree_block(root, bytenr, blocksize, generation);
6877                 if (!next)
6878                         return -EIO;
6879                 btrfs_tree_lock(next);
6880                 btrfs_set_lock_blocking(next);
6881         }
6882
6883         level--;
6884         BUG_ON(level != btrfs_header_level(next));
6885         path->nodes[level] = next;
6886         path->slots[level] = 0;
6887         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6888         wc->level = level;
6889         if (wc->level == 1)
6890                 wc->reada_slot = 0;
6891         return 0;
6892 skip:
6893         wc->refs[level - 1] = 0;
6894         wc->flags[level - 1] = 0;
6895         if (wc->stage == DROP_REFERENCE) {
6896                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6897                         parent = path->nodes[level]->start;
6898                 } else {
6899                         BUG_ON(root->root_key.objectid !=
6900                                btrfs_header_owner(path->nodes[level]));
6901                         parent = 0;
6902                 }
6903
6904                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6905                                 root->root_key.objectid, level - 1, 0, 0);
6906                 BUG_ON(ret); /* -ENOMEM */
6907         }
6908         btrfs_tree_unlock(next);
6909         free_extent_buffer(next);
6910         *lookup_info = 1;
6911         return 1;
6912 }
6913
6914 /*
6915  * helper to process tree block while walking up the tree.
6916  *
6917  * when wc->stage == DROP_REFERENCE, this function drops
6918  * reference count on the block.
6919  *
6920  * when wc->stage == UPDATE_BACKREF, this function changes
6921  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6922  * to UPDATE_BACKREF previously while processing the block.
6923  *
6924  * NOTE: return value 1 means we should stop walking up.
6925  */
6926 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6927                                  struct btrfs_root *root,
6928                                  struct btrfs_path *path,
6929                                  struct walk_control *wc)
6930 {
6931         int ret;
6932         int level = wc->level;
6933         struct extent_buffer *eb = path->nodes[level];
6934         u64 parent = 0;
6935
6936         if (wc->stage == UPDATE_BACKREF) {
6937                 BUG_ON(wc->shared_level < level);
6938                 if (level < wc->shared_level)
6939                         goto out;
6940
6941                 ret = find_next_key(path, level + 1, &wc->update_progress);
6942                 if (ret > 0)
6943                         wc->update_ref = 0;
6944
6945                 wc->stage = DROP_REFERENCE;
6946                 wc->shared_level = -1;
6947                 path->slots[level] = 0;
6948
6949                 /*
6950                  * check reference count again if the block isn't locked.
6951                  * we should start walking down the tree again if reference
6952                  * count is one.
6953                  */
6954                 if (!path->locks[level]) {
6955                         BUG_ON(level == 0);
6956                         btrfs_tree_lock(eb);
6957                         btrfs_set_lock_blocking(eb);
6958                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6959
6960                         ret = btrfs_lookup_extent_info(trans, root,
6961                                                        eb->start, eb->len,
6962                                                        &wc->refs[level],
6963                                                        &wc->flags[level]);
6964                         if (ret < 0) {
6965                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6966                                 path->locks[level] = 0;
6967                                 return ret;
6968                         }
6969                         BUG_ON(wc->refs[level] == 0);
6970                         if (wc->refs[level] == 1) {
6971                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6972                                 path->locks[level] = 0;
6973                                 return 1;
6974                         }
6975                 }
6976         }
6977
6978         /* wc->stage == DROP_REFERENCE */
6979         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6980
6981         if (wc->refs[level] == 1) {
6982                 if (level == 0) {
6983                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6984                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6985                                                     wc->for_reloc);
6986                         else
6987                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6988                                                     wc->for_reloc);
6989                         BUG_ON(ret); /* -ENOMEM */
6990                 }
6991                 /* make block locked assertion in clean_tree_block happy */
6992                 if (!path->locks[level] &&
6993                     btrfs_header_generation(eb) == trans->transid) {
6994                         btrfs_tree_lock(eb);
6995                         btrfs_set_lock_blocking(eb);
6996                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6997                 }
6998                 clean_tree_block(trans, root, eb);
6999         }
7000
7001         if (eb == root->node) {
7002                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7003                         parent = eb->start;
7004                 else
7005                         BUG_ON(root->root_key.objectid !=
7006                                btrfs_header_owner(eb));
7007         } else {
7008                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7009                         parent = path->nodes[level + 1]->start;
7010                 else
7011                         BUG_ON(root->root_key.objectid !=
7012                                btrfs_header_owner(path->nodes[level + 1]));
7013         }
7014
7015         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7016 out:
7017         wc->refs[level] = 0;
7018         wc->flags[level] = 0;
7019         return 0;
7020 }
7021
7022 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7023                                    struct btrfs_root *root,
7024                                    struct btrfs_path *path,
7025                                    struct walk_control *wc)
7026 {
7027         int level = wc->level;
7028         int lookup_info = 1;
7029         int ret;
7030
7031         while (level >= 0) {
7032                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7033                 if (ret > 0)
7034                         break;
7035
7036                 if (level == 0)
7037                         break;
7038
7039                 if (path->slots[level] >=
7040                     btrfs_header_nritems(path->nodes[level]))
7041                         break;
7042
7043                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7044                 if (ret > 0) {
7045                         path->slots[level]++;
7046                         continue;
7047                 } else if (ret < 0)
7048                         return ret;
7049                 level = wc->level;
7050         }
7051         return 0;
7052 }
7053
7054 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7055                                  struct btrfs_root *root,
7056                                  struct btrfs_path *path,
7057                                  struct walk_control *wc, int max_level)
7058 {
7059         int level = wc->level;
7060         int ret;
7061
7062         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7063         while (level < max_level && path->nodes[level]) {
7064                 wc->level = level;
7065                 if (path->slots[level] + 1 <
7066                     btrfs_header_nritems(path->nodes[level])) {
7067                         path->slots[level]++;
7068                         return 0;
7069                 } else {
7070                         ret = walk_up_proc(trans, root, path, wc);
7071                         if (ret > 0)
7072                                 return 0;
7073
7074                         if (path->locks[level]) {
7075                                 btrfs_tree_unlock_rw(path->nodes[level],
7076                                                      path->locks[level]);
7077                                 path->locks[level] = 0;
7078                         }
7079                         free_extent_buffer(path->nodes[level]);
7080                         path->nodes[level] = NULL;
7081                         level++;
7082                 }
7083         }
7084         return 1;
7085 }
7086
7087 /*
7088  * drop a subvolume tree.
7089  *
7090  * this function traverses the tree freeing any blocks that only
7091  * referenced by the tree.
7092  *
7093  * when a shared tree block is found. this function decreases its
7094  * reference count by one. if update_ref is true, this function
7095  * also make sure backrefs for the shared block and all lower level
7096  * blocks are properly updated.
7097  */
7098 int btrfs_drop_snapshot(struct btrfs_root *root,
7099                          struct btrfs_block_rsv *block_rsv, int update_ref,
7100                          int for_reloc)
7101 {
7102         struct btrfs_path *path;
7103         struct btrfs_trans_handle *trans;
7104         struct btrfs_root *tree_root = root->fs_info->tree_root;
7105         struct btrfs_root_item *root_item = &root->root_item;
7106         struct walk_control *wc;
7107         struct btrfs_key key;
7108         int err = 0;
7109         int ret;
7110         int level;
7111
7112         path = btrfs_alloc_path();
7113         if (!path) {
7114                 err = -ENOMEM;
7115                 goto out;
7116         }
7117
7118         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7119         if (!wc) {
7120                 btrfs_free_path(path);
7121                 err = -ENOMEM;
7122                 goto out;
7123         }
7124
7125         trans = btrfs_start_transaction(tree_root, 0);
7126         if (IS_ERR(trans)) {
7127                 err = PTR_ERR(trans);
7128                 goto out_free;
7129         }
7130
7131         if (block_rsv)
7132                 trans->block_rsv = block_rsv;
7133
7134         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7135                 level = btrfs_header_level(root->node);
7136                 path->nodes[level] = btrfs_lock_root_node(root);
7137                 btrfs_set_lock_blocking(path->nodes[level]);
7138                 path->slots[level] = 0;
7139                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7140                 memset(&wc->update_progress, 0,
7141                        sizeof(wc->update_progress));
7142         } else {
7143                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7144                 memcpy(&wc->update_progress, &key,
7145                        sizeof(wc->update_progress));
7146
7147                 level = root_item->drop_level;
7148                 BUG_ON(level == 0);
7149                 path->lowest_level = level;
7150                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7151                 path->lowest_level = 0;
7152                 if (ret < 0) {
7153                         err = ret;
7154                         goto out_end_trans;
7155                 }
7156                 WARN_ON(ret > 0);
7157
7158                 /*
7159                  * unlock our path, this is safe because only this
7160                  * function is allowed to delete this snapshot
7161                  */
7162                 btrfs_unlock_up_safe(path, 0);
7163
7164                 level = btrfs_header_level(root->node);
7165                 while (1) {
7166                         btrfs_tree_lock(path->nodes[level]);
7167                         btrfs_set_lock_blocking(path->nodes[level]);
7168
7169                         ret = btrfs_lookup_extent_info(trans, root,
7170                                                 path->nodes[level]->start,
7171                                                 path->nodes[level]->len,
7172                                                 &wc->refs[level],
7173                                                 &wc->flags[level]);
7174                         if (ret < 0) {
7175                                 err = ret;
7176                                 goto out_end_trans;
7177                         }
7178                         BUG_ON(wc->refs[level] == 0);
7179
7180                         if (level == root_item->drop_level)
7181                                 break;
7182
7183                         btrfs_tree_unlock(path->nodes[level]);
7184                         WARN_ON(wc->refs[level] != 1);
7185                         level--;
7186                 }
7187         }
7188
7189         wc->level = level;
7190         wc->shared_level = -1;
7191         wc->stage = DROP_REFERENCE;
7192         wc->update_ref = update_ref;
7193         wc->keep_locks = 0;
7194         wc->for_reloc = for_reloc;
7195         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7196
7197         while (1) {
7198                 ret = walk_down_tree(trans, root, path, wc);
7199                 if (ret < 0) {
7200                         err = ret;
7201                         break;
7202                 }
7203
7204                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7205                 if (ret < 0) {
7206                         err = ret;
7207                         break;
7208                 }
7209
7210                 if (ret > 0) {
7211                         BUG_ON(wc->stage != DROP_REFERENCE);
7212                         break;
7213                 }
7214
7215                 if (wc->stage == DROP_REFERENCE) {
7216                         level = wc->level;
7217                         btrfs_node_key(path->nodes[level],
7218                                        &root_item->drop_progress,
7219                                        path->slots[level]);
7220                         root_item->drop_level = level;
7221                 }
7222
7223                 BUG_ON(wc->level == 0);
7224                 if (btrfs_should_end_transaction(trans, tree_root)) {
7225                         ret = btrfs_update_root(trans, tree_root,
7226                                                 &root->root_key,
7227                                                 root_item);
7228                         if (ret) {
7229                                 btrfs_abort_transaction(trans, tree_root, ret);
7230                                 err = ret;
7231                                 goto out_end_trans;
7232                         }
7233
7234                         btrfs_end_transaction_throttle(trans, tree_root);
7235                         trans = btrfs_start_transaction(tree_root, 0);
7236                         if (IS_ERR(trans)) {
7237                                 err = PTR_ERR(trans);
7238                                 goto out_free;
7239                         }
7240                         if (block_rsv)
7241                                 trans->block_rsv = block_rsv;
7242                 }
7243         }
7244         btrfs_release_path(path);
7245         if (err)
7246                 goto out_end_trans;
7247
7248         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7249         if (ret) {
7250                 btrfs_abort_transaction(trans, tree_root, ret);
7251                 goto out_end_trans;
7252         }
7253
7254         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7255                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7256                                            NULL, NULL);
7257                 if (ret < 0) {
7258                         btrfs_abort_transaction(trans, tree_root, ret);
7259                         err = ret;
7260                         goto out_end_trans;
7261                 } else if (ret > 0) {
7262                         /* if we fail to delete the orphan item this time
7263                          * around, it'll get picked up the next time.
7264                          *
7265                          * The most common failure here is just -ENOENT.
7266                          */
7267                         btrfs_del_orphan_item(trans, tree_root,
7268                                               root->root_key.objectid);
7269                 }
7270         }
7271
7272         if (root->in_radix) {
7273                 btrfs_free_fs_root(tree_root->fs_info, root);
7274         } else {
7275                 free_extent_buffer(root->node);
7276                 free_extent_buffer(root->commit_root);
7277                 kfree(root);
7278         }
7279 out_end_trans:
7280         btrfs_end_transaction_throttle(trans, tree_root);
7281 out_free:
7282         kfree(wc);
7283         btrfs_free_path(path);
7284 out:
7285         if (err)
7286                 btrfs_std_error(root->fs_info, err);
7287         return err;
7288 }
7289
7290 /*
7291  * drop subtree rooted at tree block 'node'.
7292  *
7293  * NOTE: this function will unlock and release tree block 'node'
7294  * only used by relocation code
7295  */
7296 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7297                         struct btrfs_root *root,
7298                         struct extent_buffer *node,
7299                         struct extent_buffer *parent)
7300 {
7301         struct btrfs_path *path;
7302         struct walk_control *wc;
7303         int level;
7304         int parent_level;
7305         int ret = 0;
7306         int wret;
7307
7308         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7309
7310         path = btrfs_alloc_path();
7311         if (!path)
7312                 return -ENOMEM;
7313
7314         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7315         if (!wc) {
7316                 btrfs_free_path(path);
7317                 return -ENOMEM;
7318         }
7319
7320         btrfs_assert_tree_locked(parent);
7321         parent_level = btrfs_header_level(parent);
7322         extent_buffer_get(parent);
7323         path->nodes[parent_level] = parent;
7324         path->slots[parent_level] = btrfs_header_nritems(parent);
7325
7326         btrfs_assert_tree_locked(node);
7327         level = btrfs_header_level(node);
7328         path->nodes[level] = node;
7329         path->slots[level] = 0;
7330         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7331
7332         wc->refs[parent_level] = 1;
7333         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7334         wc->level = level;
7335         wc->shared_level = -1;
7336         wc->stage = DROP_REFERENCE;
7337         wc->update_ref = 0;
7338         wc->keep_locks = 1;
7339         wc->for_reloc = 1;
7340         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7341
7342         while (1) {
7343                 wret = walk_down_tree(trans, root, path, wc);
7344                 if (wret < 0) {
7345                         ret = wret;
7346                         break;
7347                 }
7348
7349                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7350                 if (wret < 0)
7351                         ret = wret;
7352                 if (wret != 0)
7353                         break;
7354         }
7355
7356         kfree(wc);
7357         btrfs_free_path(path);
7358         return ret;
7359 }
7360
7361 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7362 {
7363         u64 num_devices;
7364         u64 stripped;
7365
7366         /*
7367          * if restripe for this chunk_type is on pick target profile and
7368          * return, otherwise do the usual balance
7369          */
7370         stripped = get_restripe_target(root->fs_info, flags);
7371         if (stripped)
7372                 return extended_to_chunk(stripped);
7373
7374         /*
7375          * we add in the count of missing devices because we want
7376          * to make sure that any RAID levels on a degraded FS
7377          * continue to be honored.
7378          */
7379         num_devices = root->fs_info->fs_devices->rw_devices +
7380                 root->fs_info->fs_devices->missing_devices;
7381
7382         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7383                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7384                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7385
7386         if (num_devices == 1) {
7387                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7388                 stripped = flags & ~stripped;
7389
7390                 /* turn raid0 into single device chunks */
7391                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7392                         return stripped;
7393
7394                 /* turn mirroring into duplication */
7395                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7396                              BTRFS_BLOCK_GROUP_RAID10))
7397                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7398         } else {
7399                 /* they already had raid on here, just return */
7400                 if (flags & stripped)
7401                         return flags;
7402
7403                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7404                 stripped = flags & ~stripped;
7405
7406                 /* switch duplicated blocks with raid1 */
7407                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7408                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7409
7410                 /* this is drive concat, leave it alone */
7411         }
7412
7413         return flags;
7414 }
7415
7416 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7417 {
7418         struct btrfs_space_info *sinfo = cache->space_info;
7419         u64 num_bytes;
7420         u64 min_allocable_bytes;
7421         int ret = -ENOSPC;
7422
7423
7424         /*
7425          * We need some metadata space and system metadata space for
7426          * allocating chunks in some corner cases until we force to set
7427          * it to be readonly.
7428          */
7429         if ((sinfo->flags &
7430              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7431             !force)
7432                 min_allocable_bytes = 1 * 1024 * 1024;
7433         else
7434                 min_allocable_bytes = 0;
7435
7436         spin_lock(&sinfo->lock);
7437         spin_lock(&cache->lock);
7438
7439         if (cache->ro) {
7440                 ret = 0;
7441                 goto out;
7442         }
7443
7444         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7445                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7446
7447         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7448             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7449             min_allocable_bytes <= sinfo->total_bytes) {
7450                 sinfo->bytes_readonly += num_bytes;
7451                 cache->ro = 1;
7452                 ret = 0;
7453         }
7454 out:
7455         spin_unlock(&cache->lock);
7456         spin_unlock(&sinfo->lock);
7457         return ret;
7458 }
7459
7460 int btrfs_set_block_group_ro(struct btrfs_root *root,
7461                              struct btrfs_block_group_cache *cache)
7462
7463 {
7464         struct btrfs_trans_handle *trans;
7465         u64 alloc_flags;
7466         int ret;
7467
7468         BUG_ON(cache->ro);
7469
7470         trans = btrfs_join_transaction(root);
7471         if (IS_ERR(trans))
7472                 return PTR_ERR(trans);
7473
7474         alloc_flags = update_block_group_flags(root, cache->flags);
7475         if (alloc_flags != cache->flags) {
7476                 ret = do_chunk_alloc(trans, root, alloc_flags,
7477                                      CHUNK_ALLOC_FORCE);
7478                 if (ret < 0)
7479                         goto out;
7480         }
7481
7482         ret = set_block_group_ro(cache, 0);
7483         if (!ret)
7484                 goto out;
7485         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7486         ret = do_chunk_alloc(trans, root, alloc_flags,
7487                              CHUNK_ALLOC_FORCE);
7488         if (ret < 0)
7489                 goto out;
7490         ret = set_block_group_ro(cache, 0);
7491 out:
7492         btrfs_end_transaction(trans, root);
7493         return ret;
7494 }
7495
7496 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7497                             struct btrfs_root *root, u64 type)
7498 {
7499         u64 alloc_flags = get_alloc_profile(root, type);
7500         return do_chunk_alloc(trans, root, alloc_flags,
7501                               CHUNK_ALLOC_FORCE);
7502 }
7503
7504 /*
7505  * helper to account the unused space of all the readonly block group in the
7506  * list. takes mirrors into account.
7507  */
7508 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7509 {
7510         struct btrfs_block_group_cache *block_group;
7511         u64 free_bytes = 0;
7512         int factor;
7513
7514         list_for_each_entry(block_group, groups_list, list) {
7515                 spin_lock(&block_group->lock);
7516
7517                 if (!block_group->ro) {
7518                         spin_unlock(&block_group->lock);
7519                         continue;
7520                 }
7521
7522                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7523                                           BTRFS_BLOCK_GROUP_RAID10 |
7524                                           BTRFS_BLOCK_GROUP_DUP))
7525                         factor = 2;
7526                 else
7527                         factor = 1;
7528
7529                 free_bytes += (block_group->key.offset -
7530                                btrfs_block_group_used(&block_group->item)) *
7531                                factor;
7532
7533                 spin_unlock(&block_group->lock);
7534         }
7535
7536         return free_bytes;
7537 }
7538
7539 /*
7540  * helper to account the unused space of all the readonly block group in the
7541  * space_info. takes mirrors into account.
7542  */
7543 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7544 {
7545         int i;
7546         u64 free_bytes = 0;
7547
7548         spin_lock(&sinfo->lock);
7549
7550         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7551                 if (!list_empty(&sinfo->block_groups[i]))
7552                         free_bytes += __btrfs_get_ro_block_group_free_space(
7553                                                 &sinfo->block_groups[i]);
7554
7555         spin_unlock(&sinfo->lock);
7556
7557         return free_bytes;
7558 }
7559
7560 void btrfs_set_block_group_rw(struct btrfs_root *root,
7561                               struct btrfs_block_group_cache *cache)
7562 {
7563         struct btrfs_space_info *sinfo = cache->space_info;
7564         u64 num_bytes;
7565
7566         BUG_ON(!cache->ro);
7567
7568         spin_lock(&sinfo->lock);
7569         spin_lock(&cache->lock);
7570         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7571                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7572         sinfo->bytes_readonly -= num_bytes;
7573         cache->ro = 0;
7574         spin_unlock(&cache->lock);
7575         spin_unlock(&sinfo->lock);
7576 }
7577
7578 /*
7579  * checks to see if its even possible to relocate this block group.
7580  *
7581  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7582  * ok to go ahead and try.
7583  */
7584 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7585 {
7586         struct btrfs_block_group_cache *block_group;
7587         struct btrfs_space_info *space_info;
7588         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7589         struct btrfs_device *device;
7590         u64 min_free;
7591         u64 dev_min = 1;
7592         u64 dev_nr = 0;
7593         u64 target;
7594         int index;
7595         int full = 0;
7596         int ret = 0;
7597
7598         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7599
7600         /* odd, couldn't find the block group, leave it alone */
7601         if (!block_group)
7602                 return -1;
7603
7604         min_free = btrfs_block_group_used(&block_group->item);
7605
7606         /* no bytes used, we're good */
7607         if (!min_free)
7608                 goto out;
7609
7610         space_info = block_group->space_info;
7611         spin_lock(&space_info->lock);
7612
7613         full = space_info->full;
7614
7615         /*
7616          * if this is the last block group we have in this space, we can't
7617          * relocate it unless we're able to allocate a new chunk below.
7618          *
7619          * Otherwise, we need to make sure we have room in the space to handle
7620          * all of the extents from this block group.  If we can, we're good
7621          */
7622         if ((space_info->total_bytes != block_group->key.offset) &&
7623             (space_info->bytes_used + space_info->bytes_reserved +
7624              space_info->bytes_pinned + space_info->bytes_readonly +
7625              min_free < space_info->total_bytes)) {
7626                 spin_unlock(&space_info->lock);
7627                 goto out;
7628         }
7629         spin_unlock(&space_info->lock);
7630
7631         /*
7632          * ok we don't have enough space, but maybe we have free space on our
7633          * devices to allocate new chunks for relocation, so loop through our
7634          * alloc devices and guess if we have enough space.  if this block
7635          * group is going to be restriped, run checks against the target
7636          * profile instead of the current one.
7637          */
7638         ret = -1;
7639
7640         /*
7641          * index:
7642          *      0: raid10
7643          *      1: raid1
7644          *      2: dup
7645          *      3: raid0
7646          *      4: single
7647          */
7648         target = get_restripe_target(root->fs_info, block_group->flags);
7649         if (target) {
7650                 index = __get_raid_index(extended_to_chunk(target));
7651         } else {
7652                 /*
7653                  * this is just a balance, so if we were marked as full
7654                  * we know there is no space for a new chunk
7655                  */
7656                 if (full)
7657                         goto out;
7658
7659                 index = get_block_group_index(block_group);
7660         }
7661
7662         if (index == BTRFS_RAID_RAID10) {
7663                 dev_min = 4;
7664                 /* Divide by 2 */
7665                 min_free >>= 1;
7666         } else if (index == BTRFS_RAID_RAID1) {
7667                 dev_min = 2;
7668         } else if (index == BTRFS_RAID_DUP) {
7669                 /* Multiply by 2 */
7670                 min_free <<= 1;
7671         } else if (index == BTRFS_RAID_RAID0) {
7672                 dev_min = fs_devices->rw_devices;
7673                 do_div(min_free, dev_min);
7674         }
7675
7676         mutex_lock(&root->fs_info->chunk_mutex);
7677         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7678                 u64 dev_offset;
7679
7680                 /*
7681                  * check to make sure we can actually find a chunk with enough
7682                  * space to fit our block group in.
7683                  */
7684                 if (device->total_bytes > device->bytes_used + min_free &&
7685                     !device->is_tgtdev_for_dev_replace) {
7686                         ret = find_free_dev_extent(device, min_free,
7687                                                    &dev_offset, NULL);
7688                         if (!ret)
7689                                 dev_nr++;
7690
7691                         if (dev_nr >= dev_min)
7692                                 break;
7693
7694                         ret = -1;
7695                 }
7696         }
7697         mutex_unlock(&root->fs_info->chunk_mutex);
7698 out:
7699         btrfs_put_block_group(block_group);
7700         return ret;
7701 }
7702
7703 static int find_first_block_group(struct btrfs_root *root,
7704                 struct btrfs_path *path, struct btrfs_key *key)
7705 {
7706         int ret = 0;
7707         struct btrfs_key found_key;
7708         struct extent_buffer *leaf;
7709         int slot;
7710
7711         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7712         if (ret < 0)
7713                 goto out;
7714
7715         while (1) {
7716                 slot = path->slots[0];
7717                 leaf = path->nodes[0];
7718                 if (slot >= btrfs_header_nritems(leaf)) {
7719                         ret = btrfs_next_leaf(root, path);
7720                         if (ret == 0)
7721                                 continue;
7722                         if (ret < 0)
7723                                 goto out;
7724                         break;
7725                 }
7726                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7727
7728                 if (found_key.objectid >= key->objectid &&
7729                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7730                         ret = 0;
7731                         goto out;
7732                 }
7733                 path->slots[0]++;
7734         }
7735 out:
7736         return ret;
7737 }
7738
7739 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7740 {
7741         struct btrfs_block_group_cache *block_group;
7742         u64 last = 0;
7743
7744         while (1) {
7745                 struct inode *inode;
7746
7747                 block_group = btrfs_lookup_first_block_group(info, last);
7748                 while (block_group) {
7749                         spin_lock(&block_group->lock);
7750                         if (block_group->iref)
7751                                 break;
7752                         spin_unlock(&block_group->lock);
7753                         block_group = next_block_group(info->tree_root,
7754                                                        block_group);
7755                 }
7756                 if (!block_group) {
7757                         if (last == 0)
7758                                 break;
7759                         last = 0;
7760                         continue;
7761                 }
7762
7763                 inode = block_group->inode;
7764                 block_group->iref = 0;
7765                 block_group->inode = NULL;
7766                 spin_unlock(&block_group->lock);
7767                 iput(inode);
7768                 last = block_group->key.objectid + block_group->key.offset;
7769                 btrfs_put_block_group(block_group);
7770         }
7771 }
7772
7773 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7774 {
7775         struct btrfs_block_group_cache *block_group;
7776         struct btrfs_space_info *space_info;
7777         struct btrfs_caching_control *caching_ctl;
7778         struct rb_node *n;
7779
7780         down_write(&info->extent_commit_sem);
7781         while (!list_empty(&info->caching_block_groups)) {
7782                 caching_ctl = list_entry(info->caching_block_groups.next,
7783                                          struct btrfs_caching_control, list);
7784                 list_del(&caching_ctl->list);
7785                 put_caching_control(caching_ctl);
7786         }
7787         up_write(&info->extent_commit_sem);
7788
7789         spin_lock(&info->block_group_cache_lock);
7790         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7791                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7792                                        cache_node);
7793                 rb_erase(&block_group->cache_node,
7794                          &info->block_group_cache_tree);
7795                 spin_unlock(&info->block_group_cache_lock);
7796
7797                 down_write(&block_group->space_info->groups_sem);
7798                 list_del(&block_group->list);
7799                 up_write(&block_group->space_info->groups_sem);
7800
7801                 if (block_group->cached == BTRFS_CACHE_STARTED)
7802                         wait_block_group_cache_done(block_group);
7803
7804                 /*
7805                  * We haven't cached this block group, which means we could
7806                  * possibly have excluded extents on this block group.
7807                  */
7808                 if (block_group->cached == BTRFS_CACHE_NO)
7809                         free_excluded_extents(info->extent_root, block_group);
7810
7811                 btrfs_remove_free_space_cache(block_group);
7812                 btrfs_put_block_group(block_group);
7813
7814                 spin_lock(&info->block_group_cache_lock);
7815         }
7816         spin_unlock(&info->block_group_cache_lock);
7817
7818         /* now that all the block groups are freed, go through and
7819          * free all the space_info structs.  This is only called during
7820          * the final stages of unmount, and so we know nobody is
7821          * using them.  We call synchronize_rcu() once before we start,
7822          * just to be on the safe side.
7823          */
7824         synchronize_rcu();
7825
7826         release_global_block_rsv(info);
7827
7828         while(!list_empty(&info->space_info)) {
7829                 space_info = list_entry(info->space_info.next,
7830                                         struct btrfs_space_info,
7831                                         list);
7832                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
7833                         if (space_info->bytes_pinned > 0 ||
7834                             space_info->bytes_reserved > 0 ||
7835                             space_info->bytes_may_use > 0) {
7836                                 WARN_ON(1);
7837                                 dump_space_info(space_info, 0, 0);
7838                         }
7839                 }
7840                 list_del(&space_info->list);
7841                 kfree(space_info);
7842         }
7843         return 0;
7844 }
7845
7846 static void __link_block_group(struct btrfs_space_info *space_info,
7847                                struct btrfs_block_group_cache *cache)
7848 {
7849         int index = get_block_group_index(cache);
7850
7851         down_write(&space_info->groups_sem);
7852         list_add_tail(&cache->list, &space_info->block_groups[index]);
7853         up_write(&space_info->groups_sem);
7854 }
7855
7856 int btrfs_read_block_groups(struct btrfs_root *root)
7857 {
7858         struct btrfs_path *path;
7859         int ret;
7860         struct btrfs_block_group_cache *cache;
7861         struct btrfs_fs_info *info = root->fs_info;
7862         struct btrfs_space_info *space_info;
7863         struct btrfs_key key;
7864         struct btrfs_key found_key;
7865         struct extent_buffer *leaf;
7866         int need_clear = 0;
7867         u64 cache_gen;
7868
7869         root = info->extent_root;
7870         key.objectid = 0;
7871         key.offset = 0;
7872         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7873         path = btrfs_alloc_path();
7874         if (!path)
7875                 return -ENOMEM;
7876         path->reada = 1;
7877
7878         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7879         if (btrfs_test_opt(root, SPACE_CACHE) &&
7880             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7881                 need_clear = 1;
7882         if (btrfs_test_opt(root, CLEAR_CACHE))
7883                 need_clear = 1;
7884
7885         while (1) {
7886                 ret = find_first_block_group(root, path, &key);
7887                 if (ret > 0)
7888                         break;
7889                 if (ret != 0)
7890                         goto error;
7891                 leaf = path->nodes[0];
7892                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7893                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7894                 if (!cache) {
7895                         ret = -ENOMEM;
7896                         goto error;
7897                 }
7898                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7899                                                 GFP_NOFS);
7900                 if (!cache->free_space_ctl) {
7901                         kfree(cache);
7902                         ret = -ENOMEM;
7903                         goto error;
7904                 }
7905
7906                 atomic_set(&cache->count, 1);
7907                 spin_lock_init(&cache->lock);
7908                 cache->fs_info = info;
7909                 INIT_LIST_HEAD(&cache->list);
7910                 INIT_LIST_HEAD(&cache->cluster_list);
7911
7912                 if (need_clear) {
7913                         /*
7914                          * When we mount with old space cache, we need to
7915                          * set BTRFS_DC_CLEAR and set dirty flag.
7916                          *
7917                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7918                          *    truncate the old free space cache inode and
7919                          *    setup a new one.
7920                          * b) Setting 'dirty flag' makes sure that we flush
7921                          *    the new space cache info onto disk.
7922                          */
7923                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7924                         if (btrfs_test_opt(root, SPACE_CACHE))
7925                                 cache->dirty = 1;
7926                 }
7927
7928                 read_extent_buffer(leaf, &cache->item,
7929                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7930                                    sizeof(cache->item));
7931                 memcpy(&cache->key, &found_key, sizeof(found_key));
7932
7933                 key.objectid = found_key.objectid + found_key.offset;
7934                 btrfs_release_path(path);
7935                 cache->flags = btrfs_block_group_flags(&cache->item);
7936                 cache->sectorsize = root->sectorsize;
7937                 cache->full_stripe_len = btrfs_full_stripe_len(root,
7938                                                &root->fs_info->mapping_tree,
7939                                                found_key.objectid);
7940                 btrfs_init_free_space_ctl(cache);
7941
7942                 /*
7943                  * We need to exclude the super stripes now so that the space
7944                  * info has super bytes accounted for, otherwise we'll think
7945                  * we have more space than we actually do.
7946                  */
7947                 exclude_super_stripes(root, cache);
7948
7949                 /*
7950                  * check for two cases, either we are full, and therefore
7951                  * don't need to bother with the caching work since we won't
7952                  * find any space, or we are empty, and we can just add all
7953                  * the space in and be done with it.  This saves us _alot_ of
7954                  * time, particularly in the full case.
7955                  */
7956                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7957                         cache->last_byte_to_unpin = (u64)-1;
7958                         cache->cached = BTRFS_CACHE_FINISHED;
7959                         free_excluded_extents(root, cache);
7960                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7961                         cache->last_byte_to_unpin = (u64)-1;
7962                         cache->cached = BTRFS_CACHE_FINISHED;
7963                         add_new_free_space(cache, root->fs_info,
7964                                            found_key.objectid,
7965                                            found_key.objectid +
7966                                            found_key.offset);
7967                         free_excluded_extents(root, cache);
7968                 }
7969
7970                 ret = update_space_info(info, cache->flags, found_key.offset,
7971                                         btrfs_block_group_used(&cache->item),
7972                                         &space_info);
7973                 BUG_ON(ret); /* -ENOMEM */
7974                 cache->space_info = space_info;
7975                 spin_lock(&cache->space_info->lock);
7976                 cache->space_info->bytes_readonly += cache->bytes_super;
7977                 spin_unlock(&cache->space_info->lock);
7978
7979                 __link_block_group(space_info, cache);
7980
7981                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7982                 BUG_ON(ret); /* Logic error */
7983
7984                 set_avail_alloc_bits(root->fs_info, cache->flags);
7985                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7986                         set_block_group_ro(cache, 1);
7987         }
7988
7989         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7990                 if (!(get_alloc_profile(root, space_info->flags) &
7991                       (BTRFS_BLOCK_GROUP_RAID10 |
7992                        BTRFS_BLOCK_GROUP_RAID1 |
7993                        BTRFS_BLOCK_GROUP_RAID5 |
7994                        BTRFS_BLOCK_GROUP_RAID6 |
7995                        BTRFS_BLOCK_GROUP_DUP)))
7996                         continue;
7997                 /*
7998                  * avoid allocating from un-mirrored block group if there are
7999                  * mirrored block groups.
8000                  */
8001                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8002                         set_block_group_ro(cache, 1);
8003                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8004                         set_block_group_ro(cache, 1);
8005         }
8006
8007         init_global_block_rsv(info);
8008         ret = 0;
8009 error:
8010         btrfs_free_path(path);
8011         return ret;
8012 }
8013
8014 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8015                                        struct btrfs_root *root)
8016 {
8017         struct btrfs_block_group_cache *block_group, *tmp;
8018         struct btrfs_root *extent_root = root->fs_info->extent_root;
8019         struct btrfs_block_group_item item;
8020         struct btrfs_key key;
8021         int ret = 0;
8022
8023         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8024                                  new_bg_list) {
8025                 list_del_init(&block_group->new_bg_list);
8026
8027                 if (ret)
8028                         continue;
8029
8030                 spin_lock(&block_group->lock);
8031                 memcpy(&item, &block_group->item, sizeof(item));
8032                 memcpy(&key, &block_group->key, sizeof(key));
8033                 spin_unlock(&block_group->lock);
8034
8035                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8036                                         sizeof(item));
8037                 if (ret)
8038                         btrfs_abort_transaction(trans, extent_root, ret);
8039         }
8040 }
8041
8042 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8043                            struct btrfs_root *root, u64 bytes_used,
8044                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8045                            u64 size)
8046 {
8047         int ret;
8048         struct btrfs_root *extent_root;
8049         struct btrfs_block_group_cache *cache;
8050
8051         extent_root = root->fs_info->extent_root;
8052
8053         root->fs_info->last_trans_log_full_commit = trans->transid;
8054
8055         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8056         if (!cache)
8057                 return -ENOMEM;
8058         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8059                                         GFP_NOFS);
8060         if (!cache->free_space_ctl) {
8061                 kfree(cache);
8062                 return -ENOMEM;
8063         }
8064
8065         cache->key.objectid = chunk_offset;
8066         cache->key.offset = size;
8067         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8068         cache->sectorsize = root->sectorsize;
8069         cache->fs_info = root->fs_info;
8070         cache->full_stripe_len = btrfs_full_stripe_len(root,
8071                                                &root->fs_info->mapping_tree,
8072                                                chunk_offset);
8073
8074         atomic_set(&cache->count, 1);
8075         spin_lock_init(&cache->lock);
8076         INIT_LIST_HEAD(&cache->list);
8077         INIT_LIST_HEAD(&cache->cluster_list);
8078         INIT_LIST_HEAD(&cache->new_bg_list);
8079
8080         btrfs_init_free_space_ctl(cache);
8081
8082         btrfs_set_block_group_used(&cache->item, bytes_used);
8083         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8084         cache->flags = type;
8085         btrfs_set_block_group_flags(&cache->item, type);
8086
8087         cache->last_byte_to_unpin = (u64)-1;
8088         cache->cached = BTRFS_CACHE_FINISHED;
8089         exclude_super_stripes(root, cache);
8090
8091         add_new_free_space(cache, root->fs_info, chunk_offset,
8092                            chunk_offset + size);
8093
8094         free_excluded_extents(root, cache);
8095
8096         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8097                                 &cache->space_info);
8098         BUG_ON(ret); /* -ENOMEM */
8099         update_global_block_rsv(root->fs_info);
8100
8101         spin_lock(&cache->space_info->lock);
8102         cache->space_info->bytes_readonly += cache->bytes_super;
8103         spin_unlock(&cache->space_info->lock);
8104
8105         __link_block_group(cache->space_info, cache);
8106
8107         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8108         BUG_ON(ret); /* Logic error */
8109
8110         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8111
8112         set_avail_alloc_bits(extent_root->fs_info, type);
8113
8114         return 0;
8115 }
8116
8117 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8118 {
8119         u64 extra_flags = chunk_to_extended(flags) &
8120                                 BTRFS_EXTENDED_PROFILE_MASK;
8121
8122         write_seqlock(&fs_info->profiles_lock);
8123         if (flags & BTRFS_BLOCK_GROUP_DATA)
8124                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8125         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8126                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8127         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8128                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8129         write_sequnlock(&fs_info->profiles_lock);
8130 }
8131
8132 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8133                              struct btrfs_root *root, u64 group_start)
8134 {
8135         struct btrfs_path *path;
8136         struct btrfs_block_group_cache *block_group;
8137         struct btrfs_free_cluster *cluster;
8138         struct btrfs_root *tree_root = root->fs_info->tree_root;
8139         struct btrfs_key key;
8140         struct inode *inode;
8141         int ret;
8142         int index;
8143         int factor;
8144
8145         root = root->fs_info->extent_root;
8146
8147         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8148         BUG_ON(!block_group);
8149         BUG_ON(!block_group->ro);
8150
8151         /*
8152          * Free the reserved super bytes from this block group before
8153          * remove it.
8154          */
8155         free_excluded_extents(root, block_group);
8156
8157         memcpy(&key, &block_group->key, sizeof(key));
8158         index = get_block_group_index(block_group);
8159         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8160                                   BTRFS_BLOCK_GROUP_RAID1 |
8161                                   BTRFS_BLOCK_GROUP_RAID10))
8162                 factor = 2;
8163         else
8164                 factor = 1;
8165
8166         /* make sure this block group isn't part of an allocation cluster */
8167         cluster = &root->fs_info->data_alloc_cluster;
8168         spin_lock(&cluster->refill_lock);
8169         btrfs_return_cluster_to_free_space(block_group, cluster);
8170         spin_unlock(&cluster->refill_lock);
8171
8172         /*
8173          * make sure this block group isn't part of a metadata
8174          * allocation cluster
8175          */
8176         cluster = &root->fs_info->meta_alloc_cluster;
8177         spin_lock(&cluster->refill_lock);
8178         btrfs_return_cluster_to_free_space(block_group, cluster);
8179         spin_unlock(&cluster->refill_lock);
8180
8181         path = btrfs_alloc_path();
8182         if (!path) {
8183                 ret = -ENOMEM;
8184                 goto out;
8185         }
8186
8187         inode = lookup_free_space_inode(tree_root, block_group, path);
8188         if (!IS_ERR(inode)) {
8189                 ret = btrfs_orphan_add(trans, inode);
8190                 if (ret) {
8191                         btrfs_add_delayed_iput(inode);
8192                         goto out;
8193                 }
8194                 clear_nlink(inode);
8195                 /* One for the block groups ref */
8196                 spin_lock(&block_group->lock);
8197                 if (block_group->iref) {
8198                         block_group->iref = 0;
8199                         block_group->inode = NULL;
8200                         spin_unlock(&block_group->lock);
8201                         iput(inode);
8202                 } else {
8203                         spin_unlock(&block_group->lock);
8204                 }
8205                 /* One for our lookup ref */
8206                 btrfs_add_delayed_iput(inode);
8207         }
8208
8209         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8210         key.offset = block_group->key.objectid;
8211         key.type = 0;
8212
8213         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8214         if (ret < 0)
8215                 goto out;
8216         if (ret > 0)
8217                 btrfs_release_path(path);
8218         if (ret == 0) {
8219                 ret = btrfs_del_item(trans, tree_root, path);
8220                 if (ret)
8221                         goto out;
8222                 btrfs_release_path(path);
8223         }
8224
8225         spin_lock(&root->fs_info->block_group_cache_lock);
8226         rb_erase(&block_group->cache_node,
8227                  &root->fs_info->block_group_cache_tree);
8228
8229         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8230                 root->fs_info->first_logical_byte = (u64)-1;
8231         spin_unlock(&root->fs_info->block_group_cache_lock);
8232
8233         down_write(&block_group->space_info->groups_sem);
8234         /*
8235          * we must use list_del_init so people can check to see if they
8236          * are still on the list after taking the semaphore
8237          */
8238         list_del_init(&block_group->list);
8239         if (list_empty(&block_group->space_info->block_groups[index]))
8240                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8241         up_write(&block_group->space_info->groups_sem);
8242
8243         if (block_group->cached == BTRFS_CACHE_STARTED)
8244                 wait_block_group_cache_done(block_group);
8245
8246         btrfs_remove_free_space_cache(block_group);
8247
8248         spin_lock(&block_group->space_info->lock);
8249         block_group->space_info->total_bytes -= block_group->key.offset;
8250         block_group->space_info->bytes_readonly -= block_group->key.offset;
8251         block_group->space_info->disk_total -= block_group->key.offset * factor;
8252         spin_unlock(&block_group->space_info->lock);
8253
8254         memcpy(&key, &block_group->key, sizeof(key));
8255
8256         btrfs_clear_space_info_full(root->fs_info);
8257
8258         btrfs_put_block_group(block_group);
8259         btrfs_put_block_group(block_group);
8260
8261         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8262         if (ret > 0)
8263                 ret = -EIO;
8264         if (ret < 0)
8265                 goto out;
8266
8267         ret = btrfs_del_item(trans, root, path);
8268 out:
8269         btrfs_free_path(path);
8270         return ret;
8271 }
8272
8273 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8274 {
8275         struct btrfs_space_info *space_info;
8276         struct btrfs_super_block *disk_super;
8277         u64 features;
8278         u64 flags;
8279         int mixed = 0;
8280         int ret;
8281
8282         disk_super = fs_info->super_copy;
8283         if (!btrfs_super_root(disk_super))
8284                 return 1;
8285
8286         features = btrfs_super_incompat_flags(disk_super);
8287         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8288                 mixed = 1;
8289
8290         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8291         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8292         if (ret)
8293                 goto out;
8294
8295         if (mixed) {
8296                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8297                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8298         } else {
8299                 flags = BTRFS_BLOCK_GROUP_METADATA;
8300                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8301                 if (ret)
8302                         goto out;
8303
8304                 flags = BTRFS_BLOCK_GROUP_DATA;
8305                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8306         }
8307 out:
8308         return ret;
8309 }
8310
8311 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8312 {
8313         return unpin_extent_range(root, start, end);
8314 }
8315
8316 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8317                                u64 num_bytes, u64 *actual_bytes)
8318 {
8319         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8320 }
8321
8322 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8323 {
8324         struct btrfs_fs_info *fs_info = root->fs_info;
8325         struct btrfs_block_group_cache *cache = NULL;
8326         u64 group_trimmed;
8327         u64 start;
8328         u64 end;
8329         u64 trimmed = 0;
8330         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8331         int ret = 0;
8332
8333         /*
8334          * try to trim all FS space, our block group may start from non-zero.
8335          */
8336         if (range->len == total_bytes)
8337                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8338         else
8339                 cache = btrfs_lookup_block_group(fs_info, range->start);
8340
8341         while (cache) {
8342                 if (cache->key.objectid >= (range->start + range->len)) {
8343                         btrfs_put_block_group(cache);
8344                         break;
8345                 }
8346
8347                 start = max(range->start, cache->key.objectid);
8348                 end = min(range->start + range->len,
8349                                 cache->key.objectid + cache->key.offset);
8350
8351                 if (end - start >= range->minlen) {
8352                         if (!block_group_cache_done(cache)) {
8353                                 ret = cache_block_group(cache, 0);
8354                                 if (!ret)
8355                                         wait_block_group_cache_done(cache);
8356                         }
8357                         ret = btrfs_trim_block_group(cache,
8358                                                      &group_trimmed,
8359                                                      start,
8360                                                      end,
8361                                                      range->minlen);
8362
8363                         trimmed += group_trimmed;
8364                         if (ret) {
8365                                 btrfs_put_block_group(cache);
8366                                 break;
8367                         }
8368                 }
8369
8370                 cache = next_block_group(fs_info->tree_root, cache);
8371         }
8372
8373         range->len = trimmed;
8374         return ret;
8375 }