Merge branch 'for-3.5' of git://linux-nfs.org/~bfields/linux
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48
49 static struct extent_io_ops btree_extent_io_ops;
50 static void end_workqueue_fn(struct btrfs_work *work);
51 static void free_fs_root(struct btrfs_root *root);
52 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
53                                     int read_only);
54 static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57                                       struct btrfs_root *root);
58 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
59 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
60 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
61                                         struct extent_io_tree *dirty_pages,
62                                         int mark);
63 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
64                                        struct extent_io_tree *pinned_extents);
65
66 /*
67  * end_io_wq structs are used to do processing in task context when an IO is
68  * complete.  This is used during reads to verify checksums, and it is used
69  * by writes to insert metadata for new file extents after IO is complete.
70  */
71 struct end_io_wq {
72         struct bio *bio;
73         bio_end_io_t *end_io;
74         void *private;
75         struct btrfs_fs_info *info;
76         int error;
77         int metadata;
78         struct list_head list;
79         struct btrfs_work work;
80 };
81
82 /*
83  * async submit bios are used to offload expensive checksumming
84  * onto the worker threads.  They checksum file and metadata bios
85  * just before they are sent down the IO stack.
86  */
87 struct async_submit_bio {
88         struct inode *inode;
89         struct bio *bio;
90         struct list_head list;
91         extent_submit_bio_hook_t *submit_bio_start;
92         extent_submit_bio_hook_t *submit_bio_done;
93         int rw;
94         int mirror_num;
95         unsigned long bio_flags;
96         /*
97          * bio_offset is optional, can be used if the pages in the bio
98          * can't tell us where in the file the bio should go
99          */
100         u64 bio_offset;
101         struct btrfs_work work;
102         int error;
103 };
104
105 /*
106  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
107  * eb, the lockdep key is determined by the btrfs_root it belongs to and
108  * the level the eb occupies in the tree.
109  *
110  * Different roots are used for different purposes and may nest inside each
111  * other and they require separate keysets.  As lockdep keys should be
112  * static, assign keysets according to the purpose of the root as indicated
113  * by btrfs_root->objectid.  This ensures that all special purpose roots
114  * have separate keysets.
115  *
116  * Lock-nesting across peer nodes is always done with the immediate parent
117  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
118  * subclass to avoid triggering lockdep warning in such cases.
119  *
120  * The key is set by the readpage_end_io_hook after the buffer has passed
121  * csum validation but before the pages are unlocked.  It is also set by
122  * btrfs_init_new_buffer on freshly allocated blocks.
123  *
124  * We also add a check to make sure the highest level of the tree is the
125  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
126  * needs update as well.
127  */
128 #ifdef CONFIG_DEBUG_LOCK_ALLOC
129 # if BTRFS_MAX_LEVEL != 8
130 #  error
131 # endif
132
133 static struct btrfs_lockdep_keyset {
134         u64                     id;             /* root objectid */
135         const char              *name_stem;     /* lock name stem */
136         char                    names[BTRFS_MAX_LEVEL + 1][20];
137         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
138 } btrfs_lockdep_keysets[] = {
139         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
140         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
141         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
142         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
143         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
144         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
145         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
146         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
147         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
148         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
149         { .id = 0,                              .name_stem = "tree"     },
150 };
151
152 void __init btrfs_init_lockdep(void)
153 {
154         int i, j;
155
156         /* initialize lockdep class names */
157         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
158                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
159
160                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
161                         snprintf(ks->names[j], sizeof(ks->names[j]),
162                                  "btrfs-%s-%02d", ks->name_stem, j);
163         }
164 }
165
166 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
167                                     int level)
168 {
169         struct btrfs_lockdep_keyset *ks;
170
171         BUG_ON(level >= ARRAY_SIZE(ks->keys));
172
173         /* find the matching keyset, id 0 is the default entry */
174         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
175                 if (ks->id == objectid)
176                         break;
177
178         lockdep_set_class_and_name(&eb->lock,
179                                    &ks->keys[level], ks->names[level]);
180 }
181
182 #endif
183
184 /*
185  * extents on the btree inode are pretty simple, there's one extent
186  * that covers the entire device
187  */
188 static struct extent_map *btree_get_extent(struct inode *inode,
189                 struct page *page, size_t pg_offset, u64 start, u64 len,
190                 int create)
191 {
192         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
193         struct extent_map *em;
194         int ret;
195
196         read_lock(&em_tree->lock);
197         em = lookup_extent_mapping(em_tree, start, len);
198         if (em) {
199                 em->bdev =
200                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
201                 read_unlock(&em_tree->lock);
202                 goto out;
203         }
204         read_unlock(&em_tree->lock);
205
206         em = alloc_extent_map();
207         if (!em) {
208                 em = ERR_PTR(-ENOMEM);
209                 goto out;
210         }
211         em->start = 0;
212         em->len = (u64)-1;
213         em->block_len = (u64)-1;
214         em->block_start = 0;
215         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
216
217         write_lock(&em_tree->lock);
218         ret = add_extent_mapping(em_tree, em);
219         if (ret == -EEXIST) {
220                 u64 failed_start = em->start;
221                 u64 failed_len = em->len;
222
223                 free_extent_map(em);
224                 em = lookup_extent_mapping(em_tree, start, len);
225                 if (em) {
226                         ret = 0;
227                 } else {
228                         em = lookup_extent_mapping(em_tree, failed_start,
229                                                    failed_len);
230                         ret = -EIO;
231                 }
232         } else if (ret) {
233                 free_extent_map(em);
234                 em = NULL;
235         }
236         write_unlock(&em_tree->lock);
237
238         if (ret)
239                 em = ERR_PTR(ret);
240 out:
241         return em;
242 }
243
244 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
245 {
246         return crc32c(seed, data, len);
247 }
248
249 void btrfs_csum_final(u32 crc, char *result)
250 {
251         put_unaligned_le32(~crc, result);
252 }
253
254 /*
255  * compute the csum for a btree block, and either verify it or write it
256  * into the csum field of the block.
257  */
258 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
259                            int verify)
260 {
261         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
262         char *result = NULL;
263         unsigned long len;
264         unsigned long cur_len;
265         unsigned long offset = BTRFS_CSUM_SIZE;
266         char *kaddr;
267         unsigned long map_start;
268         unsigned long map_len;
269         int err;
270         u32 crc = ~(u32)0;
271         unsigned long inline_result;
272
273         len = buf->len - offset;
274         while (len > 0) {
275                 err = map_private_extent_buffer(buf, offset, 32,
276                                         &kaddr, &map_start, &map_len);
277                 if (err)
278                         return 1;
279                 cur_len = min(len, map_len - (offset - map_start));
280                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
281                                       crc, cur_len);
282                 len -= cur_len;
283                 offset += cur_len;
284         }
285         if (csum_size > sizeof(inline_result)) {
286                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
287                 if (!result)
288                         return 1;
289         } else {
290                 result = (char *)&inline_result;
291         }
292
293         btrfs_csum_final(crc, result);
294
295         if (verify) {
296                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
297                         u32 val;
298                         u32 found = 0;
299                         memcpy(&found, result, csum_size);
300
301                         read_extent_buffer(buf, &val, 0, csum_size);
302                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
303                                        "failed on %llu wanted %X found %X "
304                                        "level %d\n",
305                                        root->fs_info->sb->s_id,
306                                        (unsigned long long)buf->start, val, found,
307                                        btrfs_header_level(buf));
308                         if (result != (char *)&inline_result)
309                                 kfree(result);
310                         return 1;
311                 }
312         } else {
313                 write_extent_buffer(buf, result, 0, csum_size);
314         }
315         if (result != (char *)&inline_result)
316                 kfree(result);
317         return 0;
318 }
319
320 /*
321  * we can't consider a given block up to date unless the transid of the
322  * block matches the transid in the parent node's pointer.  This is how we
323  * detect blocks that either didn't get written at all or got written
324  * in the wrong place.
325  */
326 static int verify_parent_transid(struct extent_io_tree *io_tree,
327                                  struct extent_buffer *eb, u64 parent_transid,
328                                  int atomic)
329 {
330         struct extent_state *cached_state = NULL;
331         int ret;
332
333         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334                 return 0;
335
336         if (atomic)
337                 return -EAGAIN;
338
339         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
340                          0, &cached_state);
341         if (extent_buffer_uptodate(eb) &&
342             btrfs_header_generation(eb) == parent_transid) {
343                 ret = 0;
344                 goto out;
345         }
346         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
347                        "found %llu\n",
348                        (unsigned long long)eb->start,
349                        (unsigned long long)parent_transid,
350                        (unsigned long long)btrfs_header_generation(eb));
351         ret = 1;
352         clear_extent_buffer_uptodate(eb);
353 out:
354         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
355                              &cached_state, GFP_NOFS);
356         return ret;
357 }
358
359 /*
360  * helper to read a given tree block, doing retries as required when
361  * the checksums don't match and we have alternate mirrors to try.
362  */
363 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
364                                           struct extent_buffer *eb,
365                                           u64 start, u64 parent_transid)
366 {
367         struct extent_io_tree *io_tree;
368         int failed = 0;
369         int ret;
370         int num_copies = 0;
371         int mirror_num = 0;
372         int failed_mirror = 0;
373
374         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
375         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
376         while (1) {
377                 ret = read_extent_buffer_pages(io_tree, eb, start,
378                                                WAIT_COMPLETE,
379                                                btree_get_extent, mirror_num);
380                 if (!ret && !verify_parent_transid(io_tree, eb,
381                                                    parent_transid, 0))
382                         break;
383
384                 /*
385                  * This buffer's crc is fine, but its contents are corrupted, so
386                  * there is no reason to read the other copies, they won't be
387                  * any less wrong.
388                  */
389                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
390                         break;
391
392                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
393                                               eb->start, eb->len);
394                 if (num_copies == 1)
395                         break;
396
397                 if (!failed_mirror) {
398                         failed = 1;
399                         failed_mirror = eb->read_mirror;
400                 }
401
402                 mirror_num++;
403                 if (mirror_num == failed_mirror)
404                         mirror_num++;
405
406                 if (mirror_num > num_copies)
407                         break;
408         }
409
410         if (failed && !ret)
411                 repair_eb_io_failure(root, eb, failed_mirror);
412
413         return ret;
414 }
415
416 /*
417  * checksum a dirty tree block before IO.  This has extra checks to make sure
418  * we only fill in the checksum field in the first page of a multi-page block
419  */
420
421 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
422 {
423         struct extent_io_tree *tree;
424         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
425         u64 found_start;
426         struct extent_buffer *eb;
427
428         tree = &BTRFS_I(page->mapping->host)->io_tree;
429
430         eb = (struct extent_buffer *)page->private;
431         if (page != eb->pages[0])
432                 return 0;
433         found_start = btrfs_header_bytenr(eb);
434         if (found_start != start) {
435                 WARN_ON(1);
436                 return 0;
437         }
438         if (eb->pages[0] != page) {
439                 WARN_ON(1);
440                 return 0;
441         }
442         if (!PageUptodate(page)) {
443                 WARN_ON(1);
444                 return 0;
445         }
446         csum_tree_block(root, eb, 0);
447         return 0;
448 }
449
450 static int check_tree_block_fsid(struct btrfs_root *root,
451                                  struct extent_buffer *eb)
452 {
453         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
454         u8 fsid[BTRFS_UUID_SIZE];
455         int ret = 1;
456
457         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
458                            BTRFS_FSID_SIZE);
459         while (fs_devices) {
460                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
461                         ret = 0;
462                         break;
463                 }
464                 fs_devices = fs_devices->seed;
465         }
466         return ret;
467 }
468
469 #define CORRUPT(reason, eb, root, slot)                         \
470         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
471                "root=%llu, slot=%d\n", reason,                  \
472                (unsigned long long)btrfs_header_bytenr(eb),     \
473                (unsigned long long)root->objectid, slot)
474
475 static noinline int check_leaf(struct btrfs_root *root,
476                                struct extent_buffer *leaf)
477 {
478         struct btrfs_key key;
479         struct btrfs_key leaf_key;
480         u32 nritems = btrfs_header_nritems(leaf);
481         int slot;
482
483         if (nritems == 0)
484                 return 0;
485
486         /* Check the 0 item */
487         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
488             BTRFS_LEAF_DATA_SIZE(root)) {
489                 CORRUPT("invalid item offset size pair", leaf, root, 0);
490                 return -EIO;
491         }
492
493         /*
494          * Check to make sure each items keys are in the correct order and their
495          * offsets make sense.  We only have to loop through nritems-1 because
496          * we check the current slot against the next slot, which verifies the
497          * next slot's offset+size makes sense and that the current's slot
498          * offset is correct.
499          */
500         for (slot = 0; slot < nritems - 1; slot++) {
501                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
502                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
503
504                 /* Make sure the keys are in the right order */
505                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
506                         CORRUPT("bad key order", leaf, root, slot);
507                         return -EIO;
508                 }
509
510                 /*
511                  * Make sure the offset and ends are right, remember that the
512                  * item data starts at the end of the leaf and grows towards the
513                  * front.
514                  */
515                 if (btrfs_item_offset_nr(leaf, slot) !=
516                         btrfs_item_end_nr(leaf, slot + 1)) {
517                         CORRUPT("slot offset bad", leaf, root, slot);
518                         return -EIO;
519                 }
520
521                 /*
522                  * Check to make sure that we don't point outside of the leaf,
523                  * just incase all the items are consistent to eachother, but
524                  * all point outside of the leaf.
525                  */
526                 if (btrfs_item_end_nr(leaf, slot) >
527                     BTRFS_LEAF_DATA_SIZE(root)) {
528                         CORRUPT("slot end outside of leaf", leaf, root, slot);
529                         return -EIO;
530                 }
531         }
532
533         return 0;
534 }
535
536 struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
537                                        struct page *page, int max_walk)
538 {
539         struct extent_buffer *eb;
540         u64 start = page_offset(page);
541         u64 target = start;
542         u64 min_start;
543
544         if (start < max_walk)
545                 min_start = 0;
546         else
547                 min_start = start - max_walk;
548
549         while (start >= min_start) {
550                 eb = find_extent_buffer(tree, start, 0);
551                 if (eb) {
552                         /*
553                          * we found an extent buffer and it contains our page
554                          * horray!
555                          */
556                         if (eb->start <= target &&
557                             eb->start + eb->len > target)
558                                 return eb;
559
560                         /* we found an extent buffer that wasn't for us */
561                         free_extent_buffer(eb);
562                         return NULL;
563                 }
564                 if (start == 0)
565                         break;
566                 start -= PAGE_CACHE_SIZE;
567         }
568         return NULL;
569 }
570
571 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
572                                struct extent_state *state, int mirror)
573 {
574         struct extent_io_tree *tree;
575         u64 found_start;
576         int found_level;
577         struct extent_buffer *eb;
578         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
579         int ret = 0;
580         int reads_done;
581
582         if (!page->private)
583                 goto out;
584
585         tree = &BTRFS_I(page->mapping->host)->io_tree;
586         eb = (struct extent_buffer *)page->private;
587
588         /* the pending IO might have been the only thing that kept this buffer
589          * in memory.  Make sure we have a ref for all this other checks
590          */
591         extent_buffer_get(eb);
592
593         reads_done = atomic_dec_and_test(&eb->io_pages);
594         if (!reads_done)
595                 goto err;
596
597         eb->read_mirror = mirror;
598         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
599                 ret = -EIO;
600                 goto err;
601         }
602
603         found_start = btrfs_header_bytenr(eb);
604         if (found_start != eb->start) {
605                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
606                                "%llu %llu\n",
607                                (unsigned long long)found_start,
608                                (unsigned long long)eb->start);
609                 ret = -EIO;
610                 goto err;
611         }
612         if (check_tree_block_fsid(root, eb)) {
613                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
614                                (unsigned long long)eb->start);
615                 ret = -EIO;
616                 goto err;
617         }
618         found_level = btrfs_header_level(eb);
619
620         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
621                                        eb, found_level);
622
623         ret = csum_tree_block(root, eb, 1);
624         if (ret) {
625                 ret = -EIO;
626                 goto err;
627         }
628
629         /*
630          * If this is a leaf block and it is corrupt, set the corrupt bit so
631          * that we don't try and read the other copies of this block, just
632          * return -EIO.
633          */
634         if (found_level == 0 && check_leaf(root, eb)) {
635                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
636                 ret = -EIO;
637         }
638
639         if (!ret)
640                 set_extent_buffer_uptodate(eb);
641 err:
642         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
643                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
644                 btree_readahead_hook(root, eb, eb->start, ret);
645         }
646
647         if (ret)
648                 clear_extent_buffer_uptodate(eb);
649         free_extent_buffer(eb);
650 out:
651         return ret;
652 }
653
654 static int btree_io_failed_hook(struct page *page, int failed_mirror)
655 {
656         struct extent_buffer *eb;
657         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
658
659         eb = (struct extent_buffer *)page->private;
660         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
661         eb->read_mirror = failed_mirror;
662         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
663                 btree_readahead_hook(root, eb, eb->start, -EIO);
664         return -EIO;    /* we fixed nothing */
665 }
666
667 static void end_workqueue_bio(struct bio *bio, int err)
668 {
669         struct end_io_wq *end_io_wq = bio->bi_private;
670         struct btrfs_fs_info *fs_info;
671
672         fs_info = end_io_wq->info;
673         end_io_wq->error = err;
674         end_io_wq->work.func = end_workqueue_fn;
675         end_io_wq->work.flags = 0;
676
677         if (bio->bi_rw & REQ_WRITE) {
678                 if (end_io_wq->metadata == 1)
679                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
680                                            &end_io_wq->work);
681                 else if (end_io_wq->metadata == 2)
682                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
683                                            &end_io_wq->work);
684                 else
685                         btrfs_queue_worker(&fs_info->endio_write_workers,
686                                            &end_io_wq->work);
687         } else {
688                 if (end_io_wq->metadata)
689                         btrfs_queue_worker(&fs_info->endio_meta_workers,
690                                            &end_io_wq->work);
691                 else
692                         btrfs_queue_worker(&fs_info->endio_workers,
693                                            &end_io_wq->work);
694         }
695 }
696
697 /*
698  * For the metadata arg you want
699  *
700  * 0 - if data
701  * 1 - if normal metadta
702  * 2 - if writing to the free space cache area
703  */
704 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
705                         int metadata)
706 {
707         struct end_io_wq *end_io_wq;
708         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
709         if (!end_io_wq)
710                 return -ENOMEM;
711
712         end_io_wq->private = bio->bi_private;
713         end_io_wq->end_io = bio->bi_end_io;
714         end_io_wq->info = info;
715         end_io_wq->error = 0;
716         end_io_wq->bio = bio;
717         end_io_wq->metadata = metadata;
718
719         bio->bi_private = end_io_wq;
720         bio->bi_end_io = end_workqueue_bio;
721         return 0;
722 }
723
724 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
725 {
726         unsigned long limit = min_t(unsigned long,
727                                     info->workers.max_workers,
728                                     info->fs_devices->open_devices);
729         return 256 * limit;
730 }
731
732 static void run_one_async_start(struct btrfs_work *work)
733 {
734         struct async_submit_bio *async;
735         int ret;
736
737         async = container_of(work, struct  async_submit_bio, work);
738         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
739                                       async->mirror_num, async->bio_flags,
740                                       async->bio_offset);
741         if (ret)
742                 async->error = ret;
743 }
744
745 static void run_one_async_done(struct btrfs_work *work)
746 {
747         struct btrfs_fs_info *fs_info;
748         struct async_submit_bio *async;
749         int limit;
750
751         async = container_of(work, struct  async_submit_bio, work);
752         fs_info = BTRFS_I(async->inode)->root->fs_info;
753
754         limit = btrfs_async_submit_limit(fs_info);
755         limit = limit * 2 / 3;
756
757         atomic_dec(&fs_info->nr_async_submits);
758
759         if (atomic_read(&fs_info->nr_async_submits) < limit &&
760             waitqueue_active(&fs_info->async_submit_wait))
761                 wake_up(&fs_info->async_submit_wait);
762
763         /* If an error occured we just want to clean up the bio and move on */
764         if (async->error) {
765                 bio_endio(async->bio, async->error);
766                 return;
767         }
768
769         async->submit_bio_done(async->inode, async->rw, async->bio,
770                                async->mirror_num, async->bio_flags,
771                                async->bio_offset);
772 }
773
774 static void run_one_async_free(struct btrfs_work *work)
775 {
776         struct async_submit_bio *async;
777
778         async = container_of(work, struct  async_submit_bio, work);
779         kfree(async);
780 }
781
782 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
783                         int rw, struct bio *bio, int mirror_num,
784                         unsigned long bio_flags,
785                         u64 bio_offset,
786                         extent_submit_bio_hook_t *submit_bio_start,
787                         extent_submit_bio_hook_t *submit_bio_done)
788 {
789         struct async_submit_bio *async;
790
791         async = kmalloc(sizeof(*async), GFP_NOFS);
792         if (!async)
793                 return -ENOMEM;
794
795         async->inode = inode;
796         async->rw = rw;
797         async->bio = bio;
798         async->mirror_num = mirror_num;
799         async->submit_bio_start = submit_bio_start;
800         async->submit_bio_done = submit_bio_done;
801
802         async->work.func = run_one_async_start;
803         async->work.ordered_func = run_one_async_done;
804         async->work.ordered_free = run_one_async_free;
805
806         async->work.flags = 0;
807         async->bio_flags = bio_flags;
808         async->bio_offset = bio_offset;
809
810         async->error = 0;
811
812         atomic_inc(&fs_info->nr_async_submits);
813
814         if (rw & REQ_SYNC)
815                 btrfs_set_work_high_prio(&async->work);
816
817         btrfs_queue_worker(&fs_info->workers, &async->work);
818
819         while (atomic_read(&fs_info->async_submit_draining) &&
820               atomic_read(&fs_info->nr_async_submits)) {
821                 wait_event(fs_info->async_submit_wait,
822                            (atomic_read(&fs_info->nr_async_submits) == 0));
823         }
824
825         return 0;
826 }
827
828 static int btree_csum_one_bio(struct bio *bio)
829 {
830         struct bio_vec *bvec = bio->bi_io_vec;
831         int bio_index = 0;
832         struct btrfs_root *root;
833         int ret = 0;
834
835         WARN_ON(bio->bi_vcnt <= 0);
836         while (bio_index < bio->bi_vcnt) {
837                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
838                 ret = csum_dirty_buffer(root, bvec->bv_page);
839                 if (ret)
840                         break;
841                 bio_index++;
842                 bvec++;
843         }
844         return ret;
845 }
846
847 static int __btree_submit_bio_start(struct inode *inode, int rw,
848                                     struct bio *bio, int mirror_num,
849                                     unsigned long bio_flags,
850                                     u64 bio_offset)
851 {
852         /*
853          * when we're called for a write, we're already in the async
854          * submission context.  Just jump into btrfs_map_bio
855          */
856         return btree_csum_one_bio(bio);
857 }
858
859 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
860                                  int mirror_num, unsigned long bio_flags,
861                                  u64 bio_offset)
862 {
863         /*
864          * when we're called for a write, we're already in the async
865          * submission context.  Just jump into btrfs_map_bio
866          */
867         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
868 }
869
870 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
871                                  int mirror_num, unsigned long bio_flags,
872                                  u64 bio_offset)
873 {
874         int ret;
875
876         if (!(rw & REQ_WRITE)) {
877
878                 /*
879                  * called for a read, do the setup so that checksum validation
880                  * can happen in the async kernel threads
881                  */
882                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
883                                           bio, 1);
884                 if (ret)
885                         return ret;
886                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
887                                      mirror_num, 0);
888         }
889
890         /*
891          * kthread helpers are used to submit writes so that checksumming
892          * can happen in parallel across all CPUs
893          */
894         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
895                                    inode, rw, bio, mirror_num, 0,
896                                    bio_offset,
897                                    __btree_submit_bio_start,
898                                    __btree_submit_bio_done);
899 }
900
901 #ifdef CONFIG_MIGRATION
902 static int btree_migratepage(struct address_space *mapping,
903                         struct page *newpage, struct page *page,
904                         enum migrate_mode mode)
905 {
906         /*
907          * we can't safely write a btree page from here,
908          * we haven't done the locking hook
909          */
910         if (PageDirty(page))
911                 return -EAGAIN;
912         /*
913          * Buffers may be managed in a filesystem specific way.
914          * We must have no buffers or drop them.
915          */
916         if (page_has_private(page) &&
917             !try_to_release_page(page, GFP_KERNEL))
918                 return -EAGAIN;
919         return migrate_page(mapping, newpage, page, mode);
920 }
921 #endif
922
923
924 static int btree_writepages(struct address_space *mapping,
925                             struct writeback_control *wbc)
926 {
927         struct extent_io_tree *tree;
928         tree = &BTRFS_I(mapping->host)->io_tree;
929         if (wbc->sync_mode == WB_SYNC_NONE) {
930                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
931                 u64 num_dirty;
932                 unsigned long thresh = 32 * 1024 * 1024;
933
934                 if (wbc->for_kupdate)
935                         return 0;
936
937                 /* this is a bit racy, but that's ok */
938                 num_dirty = root->fs_info->dirty_metadata_bytes;
939                 if (num_dirty < thresh)
940                         return 0;
941         }
942         return btree_write_cache_pages(mapping, wbc);
943 }
944
945 static int btree_readpage(struct file *file, struct page *page)
946 {
947         struct extent_io_tree *tree;
948         tree = &BTRFS_I(page->mapping->host)->io_tree;
949         return extent_read_full_page(tree, page, btree_get_extent, 0);
950 }
951
952 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
953 {
954         if (PageWriteback(page) || PageDirty(page))
955                 return 0;
956         /*
957          * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
958          * slab allocation from alloc_extent_state down the callchain where
959          * it'd hit a BUG_ON as those flags are not allowed.
960          */
961         gfp_flags &= ~GFP_SLAB_BUG_MASK;
962
963         return try_release_extent_buffer(page, gfp_flags);
964 }
965
966 static void btree_invalidatepage(struct page *page, unsigned long offset)
967 {
968         struct extent_io_tree *tree;
969         tree = &BTRFS_I(page->mapping->host)->io_tree;
970         extent_invalidatepage(tree, page, offset);
971         btree_releasepage(page, GFP_NOFS);
972         if (PagePrivate(page)) {
973                 printk(KERN_WARNING "btrfs warning page private not zero "
974                        "on page %llu\n", (unsigned long long)page_offset(page));
975                 ClearPagePrivate(page);
976                 set_page_private(page, 0);
977                 page_cache_release(page);
978         }
979 }
980
981 static int btree_set_page_dirty(struct page *page)
982 {
983         struct extent_buffer *eb;
984
985         BUG_ON(!PagePrivate(page));
986         eb = (struct extent_buffer *)page->private;
987         BUG_ON(!eb);
988         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
989         BUG_ON(!atomic_read(&eb->refs));
990         btrfs_assert_tree_locked(eb);
991         return __set_page_dirty_nobuffers(page);
992 }
993
994 static const struct address_space_operations btree_aops = {
995         .readpage       = btree_readpage,
996         .writepages     = btree_writepages,
997         .releasepage    = btree_releasepage,
998         .invalidatepage = btree_invalidatepage,
999 #ifdef CONFIG_MIGRATION
1000         .migratepage    = btree_migratepage,
1001 #endif
1002         .set_page_dirty = btree_set_page_dirty,
1003 };
1004
1005 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1006                          u64 parent_transid)
1007 {
1008         struct extent_buffer *buf = NULL;
1009         struct inode *btree_inode = root->fs_info->btree_inode;
1010         int ret = 0;
1011
1012         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1013         if (!buf)
1014                 return 0;
1015         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1016                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1017         free_extent_buffer(buf);
1018         return ret;
1019 }
1020
1021 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1022                          int mirror_num, struct extent_buffer **eb)
1023 {
1024         struct extent_buffer *buf = NULL;
1025         struct inode *btree_inode = root->fs_info->btree_inode;
1026         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1027         int ret;
1028
1029         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1030         if (!buf)
1031                 return 0;
1032
1033         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1034
1035         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1036                                        btree_get_extent, mirror_num);
1037         if (ret) {
1038                 free_extent_buffer(buf);
1039                 return ret;
1040         }
1041
1042         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1043                 free_extent_buffer(buf);
1044                 return -EIO;
1045         } else if (extent_buffer_uptodate(buf)) {
1046                 *eb = buf;
1047         } else {
1048                 free_extent_buffer(buf);
1049         }
1050         return 0;
1051 }
1052
1053 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1054                                             u64 bytenr, u32 blocksize)
1055 {
1056         struct inode *btree_inode = root->fs_info->btree_inode;
1057         struct extent_buffer *eb;
1058         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1059                                 bytenr, blocksize);
1060         return eb;
1061 }
1062
1063 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1064                                                  u64 bytenr, u32 blocksize)
1065 {
1066         struct inode *btree_inode = root->fs_info->btree_inode;
1067         struct extent_buffer *eb;
1068
1069         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1070                                  bytenr, blocksize);
1071         return eb;
1072 }
1073
1074
1075 int btrfs_write_tree_block(struct extent_buffer *buf)
1076 {
1077         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1078                                         buf->start + buf->len - 1);
1079 }
1080
1081 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1082 {
1083         return filemap_fdatawait_range(buf->pages[0]->mapping,
1084                                        buf->start, buf->start + buf->len - 1);
1085 }
1086
1087 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1088                                       u32 blocksize, u64 parent_transid)
1089 {
1090         struct extent_buffer *buf = NULL;
1091         int ret;
1092
1093         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1094         if (!buf)
1095                 return NULL;
1096
1097         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1098         return buf;
1099
1100 }
1101
1102 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1103                       struct extent_buffer *buf)
1104 {
1105         if (btrfs_header_generation(buf) ==
1106             root->fs_info->running_transaction->transid) {
1107                 btrfs_assert_tree_locked(buf);
1108
1109                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1110                         spin_lock(&root->fs_info->delalloc_lock);
1111                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1112                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1113                         else {
1114                                 spin_unlock(&root->fs_info->delalloc_lock);
1115                                 btrfs_panic(root->fs_info, -EOVERFLOW,
1116                                           "Can't clear %lu bytes from "
1117                                           " dirty_mdatadata_bytes (%lu)",
1118                                           buf->len,
1119                                           root->fs_info->dirty_metadata_bytes);
1120                         }
1121                         spin_unlock(&root->fs_info->delalloc_lock);
1122                 }
1123
1124                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1125                 btrfs_set_lock_blocking(buf);
1126                 clear_extent_buffer_dirty(buf);
1127         }
1128 }
1129
1130 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1131                          u32 stripesize, struct btrfs_root *root,
1132                          struct btrfs_fs_info *fs_info,
1133                          u64 objectid)
1134 {
1135         root->node = NULL;
1136         root->commit_root = NULL;
1137         root->sectorsize = sectorsize;
1138         root->nodesize = nodesize;
1139         root->leafsize = leafsize;
1140         root->stripesize = stripesize;
1141         root->ref_cows = 0;
1142         root->track_dirty = 0;
1143         root->in_radix = 0;
1144         root->orphan_item_inserted = 0;
1145         root->orphan_cleanup_state = 0;
1146
1147         root->objectid = objectid;
1148         root->last_trans = 0;
1149         root->highest_objectid = 0;
1150         root->name = NULL;
1151         root->inode_tree = RB_ROOT;
1152         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1153         root->block_rsv = NULL;
1154         root->orphan_block_rsv = NULL;
1155
1156         INIT_LIST_HEAD(&root->dirty_list);
1157         INIT_LIST_HEAD(&root->root_list);
1158         spin_lock_init(&root->orphan_lock);
1159         spin_lock_init(&root->inode_lock);
1160         spin_lock_init(&root->accounting_lock);
1161         mutex_init(&root->objectid_mutex);
1162         mutex_init(&root->log_mutex);
1163         init_waitqueue_head(&root->log_writer_wait);
1164         init_waitqueue_head(&root->log_commit_wait[0]);
1165         init_waitqueue_head(&root->log_commit_wait[1]);
1166         atomic_set(&root->log_commit[0], 0);
1167         atomic_set(&root->log_commit[1], 0);
1168         atomic_set(&root->log_writers, 0);
1169         atomic_set(&root->orphan_inodes, 0);
1170         root->log_batch = 0;
1171         root->log_transid = 0;
1172         root->last_log_commit = 0;
1173         extent_io_tree_init(&root->dirty_log_pages,
1174                              fs_info->btree_inode->i_mapping);
1175
1176         memset(&root->root_key, 0, sizeof(root->root_key));
1177         memset(&root->root_item, 0, sizeof(root->root_item));
1178         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1179         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1180         root->defrag_trans_start = fs_info->generation;
1181         init_completion(&root->kobj_unregister);
1182         root->defrag_running = 0;
1183         root->root_key.objectid = objectid;
1184         root->anon_dev = 0;
1185 }
1186
1187 static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1188                                             struct btrfs_fs_info *fs_info,
1189                                             u64 objectid,
1190                                             struct btrfs_root *root)
1191 {
1192         int ret;
1193         u32 blocksize;
1194         u64 generation;
1195
1196         __setup_root(tree_root->nodesize, tree_root->leafsize,
1197                      tree_root->sectorsize, tree_root->stripesize,
1198                      root, fs_info, objectid);
1199         ret = btrfs_find_last_root(tree_root, objectid,
1200                                    &root->root_item, &root->root_key);
1201         if (ret > 0)
1202                 return -ENOENT;
1203         else if (ret < 0)
1204                 return ret;
1205
1206         generation = btrfs_root_generation(&root->root_item);
1207         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1208         root->commit_root = NULL;
1209         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1210                                      blocksize, generation);
1211         if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1212                 free_extent_buffer(root->node);
1213                 root->node = NULL;
1214                 return -EIO;
1215         }
1216         root->commit_root = btrfs_root_node(root);
1217         return 0;
1218 }
1219
1220 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1221 {
1222         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1223         if (root)
1224                 root->fs_info = fs_info;
1225         return root;
1226 }
1227
1228 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1229                                          struct btrfs_fs_info *fs_info)
1230 {
1231         struct btrfs_root *root;
1232         struct btrfs_root *tree_root = fs_info->tree_root;
1233         struct extent_buffer *leaf;
1234
1235         root = btrfs_alloc_root(fs_info);
1236         if (!root)
1237                 return ERR_PTR(-ENOMEM);
1238
1239         __setup_root(tree_root->nodesize, tree_root->leafsize,
1240                      tree_root->sectorsize, tree_root->stripesize,
1241                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1242
1243         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1244         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1245         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1246         /*
1247          * log trees do not get reference counted because they go away
1248          * before a real commit is actually done.  They do store pointers
1249          * to file data extents, and those reference counts still get
1250          * updated (along with back refs to the log tree).
1251          */
1252         root->ref_cows = 0;
1253
1254         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1255                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1256                                       0, 0, 0);
1257         if (IS_ERR(leaf)) {
1258                 kfree(root);
1259                 return ERR_CAST(leaf);
1260         }
1261
1262         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1263         btrfs_set_header_bytenr(leaf, leaf->start);
1264         btrfs_set_header_generation(leaf, trans->transid);
1265         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1266         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1267         root->node = leaf;
1268
1269         write_extent_buffer(root->node, root->fs_info->fsid,
1270                             (unsigned long)btrfs_header_fsid(root->node),
1271                             BTRFS_FSID_SIZE);
1272         btrfs_mark_buffer_dirty(root->node);
1273         btrfs_tree_unlock(root->node);
1274         return root;
1275 }
1276
1277 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1278                              struct btrfs_fs_info *fs_info)
1279 {
1280         struct btrfs_root *log_root;
1281
1282         log_root = alloc_log_tree(trans, fs_info);
1283         if (IS_ERR(log_root))
1284                 return PTR_ERR(log_root);
1285         WARN_ON(fs_info->log_root_tree);
1286         fs_info->log_root_tree = log_root;
1287         return 0;
1288 }
1289
1290 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1291                        struct btrfs_root *root)
1292 {
1293         struct btrfs_root *log_root;
1294         struct btrfs_inode_item *inode_item;
1295
1296         log_root = alloc_log_tree(trans, root->fs_info);
1297         if (IS_ERR(log_root))
1298                 return PTR_ERR(log_root);
1299
1300         log_root->last_trans = trans->transid;
1301         log_root->root_key.offset = root->root_key.objectid;
1302
1303         inode_item = &log_root->root_item.inode;
1304         inode_item->generation = cpu_to_le64(1);
1305         inode_item->size = cpu_to_le64(3);
1306         inode_item->nlink = cpu_to_le32(1);
1307         inode_item->nbytes = cpu_to_le64(root->leafsize);
1308         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1309
1310         btrfs_set_root_node(&log_root->root_item, log_root->node);
1311
1312         WARN_ON(root->log_root);
1313         root->log_root = log_root;
1314         root->log_transid = 0;
1315         root->last_log_commit = 0;
1316         return 0;
1317 }
1318
1319 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1320                                                struct btrfs_key *location)
1321 {
1322         struct btrfs_root *root;
1323         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1324         struct btrfs_path *path;
1325         struct extent_buffer *l;
1326         u64 generation;
1327         u32 blocksize;
1328         int ret = 0;
1329
1330         root = btrfs_alloc_root(fs_info);
1331         if (!root)
1332                 return ERR_PTR(-ENOMEM);
1333         if (location->offset == (u64)-1) {
1334                 ret = find_and_setup_root(tree_root, fs_info,
1335                                           location->objectid, root);
1336                 if (ret) {
1337                         kfree(root);
1338                         return ERR_PTR(ret);
1339                 }
1340                 goto out;
1341         }
1342
1343         __setup_root(tree_root->nodesize, tree_root->leafsize,
1344                      tree_root->sectorsize, tree_root->stripesize,
1345                      root, fs_info, location->objectid);
1346
1347         path = btrfs_alloc_path();
1348         if (!path) {
1349                 kfree(root);
1350                 return ERR_PTR(-ENOMEM);
1351         }
1352         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1353         if (ret == 0) {
1354                 l = path->nodes[0];
1355                 read_extent_buffer(l, &root->root_item,
1356                                 btrfs_item_ptr_offset(l, path->slots[0]),
1357                                 sizeof(root->root_item));
1358                 memcpy(&root->root_key, location, sizeof(*location));
1359         }
1360         btrfs_free_path(path);
1361         if (ret) {
1362                 kfree(root);
1363                 if (ret > 0)
1364                         ret = -ENOENT;
1365                 return ERR_PTR(ret);
1366         }
1367
1368         generation = btrfs_root_generation(&root->root_item);
1369         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1370         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1371                                      blocksize, generation);
1372         root->commit_root = btrfs_root_node(root);
1373         BUG_ON(!root->node); /* -ENOMEM */
1374 out:
1375         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1376                 root->ref_cows = 1;
1377                 btrfs_check_and_init_root_item(&root->root_item);
1378         }
1379
1380         return root;
1381 }
1382
1383 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1384                                               struct btrfs_key *location)
1385 {
1386         struct btrfs_root *root;
1387         int ret;
1388
1389         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1390                 return fs_info->tree_root;
1391         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1392                 return fs_info->extent_root;
1393         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1394                 return fs_info->chunk_root;
1395         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1396                 return fs_info->dev_root;
1397         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1398                 return fs_info->csum_root;
1399 again:
1400         spin_lock(&fs_info->fs_roots_radix_lock);
1401         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1402                                  (unsigned long)location->objectid);
1403         spin_unlock(&fs_info->fs_roots_radix_lock);
1404         if (root)
1405                 return root;
1406
1407         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1408         if (IS_ERR(root))
1409                 return root;
1410
1411         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1412         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1413                                         GFP_NOFS);
1414         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1415                 ret = -ENOMEM;
1416                 goto fail;
1417         }
1418
1419         btrfs_init_free_ino_ctl(root);
1420         mutex_init(&root->fs_commit_mutex);
1421         spin_lock_init(&root->cache_lock);
1422         init_waitqueue_head(&root->cache_wait);
1423
1424         ret = get_anon_bdev(&root->anon_dev);
1425         if (ret)
1426                 goto fail;
1427
1428         if (btrfs_root_refs(&root->root_item) == 0) {
1429                 ret = -ENOENT;
1430                 goto fail;
1431         }
1432
1433         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1434         if (ret < 0)
1435                 goto fail;
1436         if (ret == 0)
1437                 root->orphan_item_inserted = 1;
1438
1439         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1440         if (ret)
1441                 goto fail;
1442
1443         spin_lock(&fs_info->fs_roots_radix_lock);
1444         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1445                                 (unsigned long)root->root_key.objectid,
1446                                 root);
1447         if (ret == 0)
1448                 root->in_radix = 1;
1449
1450         spin_unlock(&fs_info->fs_roots_radix_lock);
1451         radix_tree_preload_end();
1452         if (ret) {
1453                 if (ret == -EEXIST) {
1454                         free_fs_root(root);
1455                         goto again;
1456                 }
1457                 goto fail;
1458         }
1459
1460         ret = btrfs_find_dead_roots(fs_info->tree_root,
1461                                     root->root_key.objectid);
1462         WARN_ON(ret);
1463         return root;
1464 fail:
1465         free_fs_root(root);
1466         return ERR_PTR(ret);
1467 }
1468
1469 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1470 {
1471         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1472         int ret = 0;
1473         struct btrfs_device *device;
1474         struct backing_dev_info *bdi;
1475
1476         rcu_read_lock();
1477         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1478                 if (!device->bdev)
1479                         continue;
1480                 bdi = blk_get_backing_dev_info(device->bdev);
1481                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1482                         ret = 1;
1483                         break;
1484                 }
1485         }
1486         rcu_read_unlock();
1487         return ret;
1488 }
1489
1490 /*
1491  * If this fails, caller must call bdi_destroy() to get rid of the
1492  * bdi again.
1493  */
1494 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1495 {
1496         int err;
1497
1498         bdi->capabilities = BDI_CAP_MAP_COPY;
1499         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1500         if (err)
1501                 return err;
1502
1503         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1504         bdi->congested_fn       = btrfs_congested_fn;
1505         bdi->congested_data     = info;
1506         return 0;
1507 }
1508
1509 /*
1510  * called by the kthread helper functions to finally call the bio end_io
1511  * functions.  This is where read checksum verification actually happens
1512  */
1513 static void end_workqueue_fn(struct btrfs_work *work)
1514 {
1515         struct bio *bio;
1516         struct end_io_wq *end_io_wq;
1517         struct btrfs_fs_info *fs_info;
1518         int error;
1519
1520         end_io_wq = container_of(work, struct end_io_wq, work);
1521         bio = end_io_wq->bio;
1522         fs_info = end_io_wq->info;
1523
1524         error = end_io_wq->error;
1525         bio->bi_private = end_io_wq->private;
1526         bio->bi_end_io = end_io_wq->end_io;
1527         kfree(end_io_wq);
1528         bio_endio(bio, error);
1529 }
1530
1531 static int cleaner_kthread(void *arg)
1532 {
1533         struct btrfs_root *root = arg;
1534
1535         do {
1536                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1537
1538                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1539                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1540                         btrfs_run_delayed_iputs(root);
1541                         btrfs_clean_old_snapshots(root);
1542                         mutex_unlock(&root->fs_info->cleaner_mutex);
1543                         btrfs_run_defrag_inodes(root->fs_info);
1544                 }
1545
1546                 if (!try_to_freeze()) {
1547                         set_current_state(TASK_INTERRUPTIBLE);
1548                         if (!kthread_should_stop())
1549                                 schedule();
1550                         __set_current_state(TASK_RUNNING);
1551                 }
1552         } while (!kthread_should_stop());
1553         return 0;
1554 }
1555
1556 static int transaction_kthread(void *arg)
1557 {
1558         struct btrfs_root *root = arg;
1559         struct btrfs_trans_handle *trans;
1560         struct btrfs_transaction *cur;
1561         u64 transid;
1562         unsigned long now;
1563         unsigned long delay;
1564         bool cannot_commit;
1565
1566         do {
1567                 cannot_commit = false;
1568                 delay = HZ * 30;
1569                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1570                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1571
1572                 spin_lock(&root->fs_info->trans_lock);
1573                 cur = root->fs_info->running_transaction;
1574                 if (!cur) {
1575                         spin_unlock(&root->fs_info->trans_lock);
1576                         goto sleep;
1577                 }
1578
1579                 now = get_seconds();
1580                 if (!cur->blocked &&
1581                     (now < cur->start_time || now - cur->start_time < 30)) {
1582                         spin_unlock(&root->fs_info->trans_lock);
1583                         delay = HZ * 5;
1584                         goto sleep;
1585                 }
1586                 transid = cur->transid;
1587                 spin_unlock(&root->fs_info->trans_lock);
1588
1589                 /* If the file system is aborted, this will always fail. */
1590                 trans = btrfs_join_transaction(root);
1591                 if (IS_ERR(trans)) {
1592                         cannot_commit = true;
1593                         goto sleep;
1594                 }
1595                 if (transid == trans->transid) {
1596                         btrfs_commit_transaction(trans, root);
1597                 } else {
1598                         btrfs_end_transaction(trans, root);
1599                 }
1600 sleep:
1601                 wake_up_process(root->fs_info->cleaner_kthread);
1602                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1603
1604                 if (!try_to_freeze()) {
1605                         set_current_state(TASK_INTERRUPTIBLE);
1606                         if (!kthread_should_stop() &&
1607                             (!btrfs_transaction_blocked(root->fs_info) ||
1608                              cannot_commit))
1609                                 schedule_timeout(delay);
1610                         __set_current_state(TASK_RUNNING);
1611                 }
1612         } while (!kthread_should_stop());
1613         return 0;
1614 }
1615
1616 /*
1617  * this will find the highest generation in the array of
1618  * root backups.  The index of the highest array is returned,
1619  * or -1 if we can't find anything.
1620  *
1621  * We check to make sure the array is valid by comparing the
1622  * generation of the latest  root in the array with the generation
1623  * in the super block.  If they don't match we pitch it.
1624  */
1625 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1626 {
1627         u64 cur;
1628         int newest_index = -1;
1629         struct btrfs_root_backup *root_backup;
1630         int i;
1631
1632         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1633                 root_backup = info->super_copy->super_roots + i;
1634                 cur = btrfs_backup_tree_root_gen(root_backup);
1635                 if (cur == newest_gen)
1636                         newest_index = i;
1637         }
1638
1639         /* check to see if we actually wrapped around */
1640         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1641                 root_backup = info->super_copy->super_roots;
1642                 cur = btrfs_backup_tree_root_gen(root_backup);
1643                 if (cur == newest_gen)
1644                         newest_index = 0;
1645         }
1646         return newest_index;
1647 }
1648
1649
1650 /*
1651  * find the oldest backup so we know where to store new entries
1652  * in the backup array.  This will set the backup_root_index
1653  * field in the fs_info struct
1654  */
1655 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1656                                      u64 newest_gen)
1657 {
1658         int newest_index = -1;
1659
1660         newest_index = find_newest_super_backup(info, newest_gen);
1661         /* if there was garbage in there, just move along */
1662         if (newest_index == -1) {
1663                 info->backup_root_index = 0;
1664         } else {
1665                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1666         }
1667 }
1668
1669 /*
1670  * copy all the root pointers into the super backup array.
1671  * this will bump the backup pointer by one when it is
1672  * done
1673  */
1674 static void backup_super_roots(struct btrfs_fs_info *info)
1675 {
1676         int next_backup;
1677         struct btrfs_root_backup *root_backup;
1678         int last_backup;
1679
1680         next_backup = info->backup_root_index;
1681         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1682                 BTRFS_NUM_BACKUP_ROOTS;
1683
1684         /*
1685          * just overwrite the last backup if we're at the same generation
1686          * this happens only at umount
1687          */
1688         root_backup = info->super_for_commit->super_roots + last_backup;
1689         if (btrfs_backup_tree_root_gen(root_backup) ==
1690             btrfs_header_generation(info->tree_root->node))
1691                 next_backup = last_backup;
1692
1693         root_backup = info->super_for_commit->super_roots + next_backup;
1694
1695         /*
1696          * make sure all of our padding and empty slots get zero filled
1697          * regardless of which ones we use today
1698          */
1699         memset(root_backup, 0, sizeof(*root_backup));
1700
1701         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1702
1703         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1704         btrfs_set_backup_tree_root_gen(root_backup,
1705                                btrfs_header_generation(info->tree_root->node));
1706
1707         btrfs_set_backup_tree_root_level(root_backup,
1708                                btrfs_header_level(info->tree_root->node));
1709
1710         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1711         btrfs_set_backup_chunk_root_gen(root_backup,
1712                                btrfs_header_generation(info->chunk_root->node));
1713         btrfs_set_backup_chunk_root_level(root_backup,
1714                                btrfs_header_level(info->chunk_root->node));
1715
1716         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1717         btrfs_set_backup_extent_root_gen(root_backup,
1718                                btrfs_header_generation(info->extent_root->node));
1719         btrfs_set_backup_extent_root_level(root_backup,
1720                                btrfs_header_level(info->extent_root->node));
1721
1722         /*
1723          * we might commit during log recovery, which happens before we set
1724          * the fs_root.  Make sure it is valid before we fill it in.
1725          */
1726         if (info->fs_root && info->fs_root->node) {
1727                 btrfs_set_backup_fs_root(root_backup,
1728                                          info->fs_root->node->start);
1729                 btrfs_set_backup_fs_root_gen(root_backup,
1730                                btrfs_header_generation(info->fs_root->node));
1731                 btrfs_set_backup_fs_root_level(root_backup,
1732                                btrfs_header_level(info->fs_root->node));
1733         }
1734
1735         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1736         btrfs_set_backup_dev_root_gen(root_backup,
1737                                btrfs_header_generation(info->dev_root->node));
1738         btrfs_set_backup_dev_root_level(root_backup,
1739                                        btrfs_header_level(info->dev_root->node));
1740
1741         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1742         btrfs_set_backup_csum_root_gen(root_backup,
1743                                btrfs_header_generation(info->csum_root->node));
1744         btrfs_set_backup_csum_root_level(root_backup,
1745                                btrfs_header_level(info->csum_root->node));
1746
1747         btrfs_set_backup_total_bytes(root_backup,
1748                              btrfs_super_total_bytes(info->super_copy));
1749         btrfs_set_backup_bytes_used(root_backup,
1750                              btrfs_super_bytes_used(info->super_copy));
1751         btrfs_set_backup_num_devices(root_backup,
1752                              btrfs_super_num_devices(info->super_copy));
1753
1754         /*
1755          * if we don't copy this out to the super_copy, it won't get remembered
1756          * for the next commit
1757          */
1758         memcpy(&info->super_copy->super_roots,
1759                &info->super_for_commit->super_roots,
1760                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1761 }
1762
1763 /*
1764  * this copies info out of the root backup array and back into
1765  * the in-memory super block.  It is meant to help iterate through
1766  * the array, so you send it the number of backups you've already
1767  * tried and the last backup index you used.
1768  *
1769  * this returns -1 when it has tried all the backups
1770  */
1771 static noinline int next_root_backup(struct btrfs_fs_info *info,
1772                                      struct btrfs_super_block *super,
1773                                      int *num_backups_tried, int *backup_index)
1774 {
1775         struct btrfs_root_backup *root_backup;
1776         int newest = *backup_index;
1777
1778         if (*num_backups_tried == 0) {
1779                 u64 gen = btrfs_super_generation(super);
1780
1781                 newest = find_newest_super_backup(info, gen);
1782                 if (newest == -1)
1783                         return -1;
1784
1785                 *backup_index = newest;
1786                 *num_backups_tried = 1;
1787         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1788                 /* we've tried all the backups, all done */
1789                 return -1;
1790         } else {
1791                 /* jump to the next oldest backup */
1792                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1793                         BTRFS_NUM_BACKUP_ROOTS;
1794                 *backup_index = newest;
1795                 *num_backups_tried += 1;
1796         }
1797         root_backup = super->super_roots + newest;
1798
1799         btrfs_set_super_generation(super,
1800                                    btrfs_backup_tree_root_gen(root_backup));
1801         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1802         btrfs_set_super_root_level(super,
1803                                    btrfs_backup_tree_root_level(root_backup));
1804         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1805
1806         /*
1807          * fixme: the total bytes and num_devices need to match or we should
1808          * need a fsck
1809          */
1810         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1811         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1812         return 0;
1813 }
1814
1815 /* helper to cleanup tree roots */
1816 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1817 {
1818         free_extent_buffer(info->tree_root->node);
1819         free_extent_buffer(info->tree_root->commit_root);
1820         free_extent_buffer(info->dev_root->node);
1821         free_extent_buffer(info->dev_root->commit_root);
1822         free_extent_buffer(info->extent_root->node);
1823         free_extent_buffer(info->extent_root->commit_root);
1824         free_extent_buffer(info->csum_root->node);
1825         free_extent_buffer(info->csum_root->commit_root);
1826
1827         info->tree_root->node = NULL;
1828         info->tree_root->commit_root = NULL;
1829         info->dev_root->node = NULL;
1830         info->dev_root->commit_root = NULL;
1831         info->extent_root->node = NULL;
1832         info->extent_root->commit_root = NULL;
1833         info->csum_root->node = NULL;
1834         info->csum_root->commit_root = NULL;
1835
1836         if (chunk_root) {
1837                 free_extent_buffer(info->chunk_root->node);
1838                 free_extent_buffer(info->chunk_root->commit_root);
1839                 info->chunk_root->node = NULL;
1840                 info->chunk_root->commit_root = NULL;
1841         }
1842 }
1843
1844
1845 int open_ctree(struct super_block *sb,
1846                struct btrfs_fs_devices *fs_devices,
1847                char *options)
1848 {
1849         u32 sectorsize;
1850         u32 nodesize;
1851         u32 leafsize;
1852         u32 blocksize;
1853         u32 stripesize;
1854         u64 generation;
1855         u64 features;
1856         struct btrfs_key location;
1857         struct buffer_head *bh;
1858         struct btrfs_super_block *disk_super;
1859         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1860         struct btrfs_root *tree_root;
1861         struct btrfs_root *extent_root;
1862         struct btrfs_root *csum_root;
1863         struct btrfs_root *chunk_root;
1864         struct btrfs_root *dev_root;
1865         struct btrfs_root *log_tree_root;
1866         int ret;
1867         int err = -EINVAL;
1868         int num_backups_tried = 0;
1869         int backup_index = 0;
1870
1871         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
1872         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
1873         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
1874         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
1875         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
1876
1877         if (!tree_root || !extent_root || !csum_root ||
1878             !chunk_root || !dev_root) {
1879                 err = -ENOMEM;
1880                 goto fail;
1881         }
1882
1883         ret = init_srcu_struct(&fs_info->subvol_srcu);
1884         if (ret) {
1885                 err = ret;
1886                 goto fail;
1887         }
1888
1889         ret = setup_bdi(fs_info, &fs_info->bdi);
1890         if (ret) {
1891                 err = ret;
1892                 goto fail_srcu;
1893         }
1894
1895         fs_info->btree_inode = new_inode(sb);
1896         if (!fs_info->btree_inode) {
1897                 err = -ENOMEM;
1898                 goto fail_bdi;
1899         }
1900
1901         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1902
1903         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1904         INIT_LIST_HEAD(&fs_info->trans_list);
1905         INIT_LIST_HEAD(&fs_info->dead_roots);
1906         INIT_LIST_HEAD(&fs_info->delayed_iputs);
1907         INIT_LIST_HEAD(&fs_info->hashers);
1908         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1909         INIT_LIST_HEAD(&fs_info->ordered_operations);
1910         INIT_LIST_HEAD(&fs_info->caching_block_groups);
1911         spin_lock_init(&fs_info->delalloc_lock);
1912         spin_lock_init(&fs_info->trans_lock);
1913         spin_lock_init(&fs_info->ref_cache_lock);
1914         spin_lock_init(&fs_info->fs_roots_radix_lock);
1915         spin_lock_init(&fs_info->delayed_iput_lock);
1916         spin_lock_init(&fs_info->defrag_inodes_lock);
1917         spin_lock_init(&fs_info->free_chunk_lock);
1918         spin_lock_init(&fs_info->tree_mod_seq_lock);
1919         rwlock_init(&fs_info->tree_mod_log_lock);
1920         mutex_init(&fs_info->reloc_mutex);
1921
1922         init_completion(&fs_info->kobj_unregister);
1923         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1924         INIT_LIST_HEAD(&fs_info->space_info);
1925         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
1926         btrfs_mapping_init(&fs_info->mapping_tree);
1927         btrfs_init_block_rsv(&fs_info->global_block_rsv);
1928         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1929         btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1930         btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1931         btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1932         btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
1933         atomic_set(&fs_info->nr_async_submits, 0);
1934         atomic_set(&fs_info->async_delalloc_pages, 0);
1935         atomic_set(&fs_info->async_submit_draining, 0);
1936         atomic_set(&fs_info->nr_async_bios, 0);
1937         atomic_set(&fs_info->defrag_running, 0);
1938         atomic_set(&fs_info->tree_mod_seq, 0);
1939         fs_info->sb = sb;
1940         fs_info->max_inline = 8192 * 1024;
1941         fs_info->metadata_ratio = 0;
1942         fs_info->defrag_inodes = RB_ROOT;
1943         fs_info->trans_no_join = 0;
1944         fs_info->free_chunk_space = 0;
1945         fs_info->tree_mod_log = RB_ROOT;
1946
1947         /* readahead state */
1948         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
1949         spin_lock_init(&fs_info->reada_lock);
1950
1951         fs_info->thread_pool_size = min_t(unsigned long,
1952                                           num_online_cpus() + 2, 8);
1953
1954         INIT_LIST_HEAD(&fs_info->ordered_extents);
1955         spin_lock_init(&fs_info->ordered_extent_lock);
1956         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
1957                                         GFP_NOFS);
1958         if (!fs_info->delayed_root) {
1959                 err = -ENOMEM;
1960                 goto fail_iput;
1961         }
1962         btrfs_init_delayed_root(fs_info->delayed_root);
1963
1964         mutex_init(&fs_info->scrub_lock);
1965         atomic_set(&fs_info->scrubs_running, 0);
1966         atomic_set(&fs_info->scrub_pause_req, 0);
1967         atomic_set(&fs_info->scrubs_paused, 0);
1968         atomic_set(&fs_info->scrub_cancel_req, 0);
1969         init_waitqueue_head(&fs_info->scrub_pause_wait);
1970         init_rwsem(&fs_info->scrub_super_lock);
1971         fs_info->scrub_workers_refcnt = 0;
1972 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1973         fs_info->check_integrity_print_mask = 0;
1974 #endif
1975
1976         spin_lock_init(&fs_info->balance_lock);
1977         mutex_init(&fs_info->balance_mutex);
1978         atomic_set(&fs_info->balance_running, 0);
1979         atomic_set(&fs_info->balance_pause_req, 0);
1980         atomic_set(&fs_info->balance_cancel_req, 0);
1981         fs_info->balance_ctl = NULL;
1982         init_waitqueue_head(&fs_info->balance_wait_q);
1983
1984         sb->s_blocksize = 4096;
1985         sb->s_blocksize_bits = blksize_bits(4096);
1986         sb->s_bdi = &fs_info->bdi;
1987
1988         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1989         set_nlink(fs_info->btree_inode, 1);
1990         /*
1991          * we set the i_size on the btree inode to the max possible int.
1992          * the real end of the address space is determined by all of
1993          * the devices in the system
1994          */
1995         fs_info->btree_inode->i_size = OFFSET_MAX;
1996         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1997         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1998
1999         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2000         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2001                              fs_info->btree_inode->i_mapping);
2002         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2003         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2004
2005         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2006
2007         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2008         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2009                sizeof(struct btrfs_key));
2010         set_bit(BTRFS_INODE_DUMMY,
2011                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2012         insert_inode_hash(fs_info->btree_inode);
2013
2014         spin_lock_init(&fs_info->block_group_cache_lock);
2015         fs_info->block_group_cache_tree = RB_ROOT;
2016
2017         extent_io_tree_init(&fs_info->freed_extents[0],
2018                              fs_info->btree_inode->i_mapping);
2019         extent_io_tree_init(&fs_info->freed_extents[1],
2020                              fs_info->btree_inode->i_mapping);
2021         fs_info->pinned_extents = &fs_info->freed_extents[0];
2022         fs_info->do_barriers = 1;
2023
2024
2025         mutex_init(&fs_info->ordered_operations_mutex);
2026         mutex_init(&fs_info->tree_log_mutex);
2027         mutex_init(&fs_info->chunk_mutex);
2028         mutex_init(&fs_info->transaction_kthread_mutex);
2029         mutex_init(&fs_info->cleaner_mutex);
2030         mutex_init(&fs_info->volume_mutex);
2031         init_rwsem(&fs_info->extent_commit_sem);
2032         init_rwsem(&fs_info->cleanup_work_sem);
2033         init_rwsem(&fs_info->subvol_sem);
2034
2035         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2036         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2037
2038         init_waitqueue_head(&fs_info->transaction_throttle);
2039         init_waitqueue_head(&fs_info->transaction_wait);
2040         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2041         init_waitqueue_head(&fs_info->async_submit_wait);
2042
2043         __setup_root(4096, 4096, 4096, 4096, tree_root,
2044                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2045
2046         invalidate_bdev(fs_devices->latest_bdev);
2047         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2048         if (!bh) {
2049                 err = -EINVAL;
2050                 goto fail_alloc;
2051         }
2052
2053         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2054         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2055                sizeof(*fs_info->super_for_commit));
2056         brelse(bh);
2057
2058         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2059
2060         disk_super = fs_info->super_copy;
2061         if (!btrfs_super_root(disk_super))
2062                 goto fail_alloc;
2063
2064         /* check FS state, whether FS is broken. */
2065         fs_info->fs_state |= btrfs_super_flags(disk_super);
2066
2067         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2068         if (ret) {
2069                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2070                 err = ret;
2071                 goto fail_alloc;
2072         }
2073
2074         /*
2075          * run through our array of backup supers and setup
2076          * our ring pointer to the oldest one
2077          */
2078         generation = btrfs_super_generation(disk_super);
2079         find_oldest_super_backup(fs_info, generation);
2080
2081         /*
2082          * In the long term, we'll store the compression type in the super
2083          * block, and it'll be used for per file compression control.
2084          */
2085         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2086
2087         ret = btrfs_parse_options(tree_root, options);
2088         if (ret) {
2089                 err = ret;
2090                 goto fail_alloc;
2091         }
2092
2093         features = btrfs_super_incompat_flags(disk_super) &
2094                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2095         if (features) {
2096                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2097                        "unsupported optional features (%Lx).\n",
2098                        (unsigned long long)features);
2099                 err = -EINVAL;
2100                 goto fail_alloc;
2101         }
2102
2103         if (btrfs_super_leafsize(disk_super) !=
2104             btrfs_super_nodesize(disk_super)) {
2105                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2106                        "blocksizes don't match.  node %d leaf %d\n",
2107                        btrfs_super_nodesize(disk_super),
2108                        btrfs_super_leafsize(disk_super));
2109                 err = -EINVAL;
2110                 goto fail_alloc;
2111         }
2112         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2113                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2114                        "blocksize (%d) was too large\n",
2115                        btrfs_super_leafsize(disk_super));
2116                 err = -EINVAL;
2117                 goto fail_alloc;
2118         }
2119
2120         features = btrfs_super_incompat_flags(disk_super);
2121         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2122         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2123                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2124
2125         /*
2126          * flag our filesystem as having big metadata blocks if
2127          * they are bigger than the page size
2128          */
2129         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2130                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2131                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2132                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2133         }
2134
2135         nodesize = btrfs_super_nodesize(disk_super);
2136         leafsize = btrfs_super_leafsize(disk_super);
2137         sectorsize = btrfs_super_sectorsize(disk_super);
2138         stripesize = btrfs_super_stripesize(disk_super);
2139
2140         /*
2141          * mixed block groups end up with duplicate but slightly offset
2142          * extent buffers for the same range.  It leads to corruptions
2143          */
2144         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2145             (sectorsize != leafsize)) {
2146                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2147                                 "are not allowed for mixed block groups on %s\n",
2148                                 sb->s_id);
2149                 goto fail_alloc;
2150         }
2151
2152         btrfs_set_super_incompat_flags(disk_super, features);
2153
2154         features = btrfs_super_compat_ro_flags(disk_super) &
2155                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2156         if (!(sb->s_flags & MS_RDONLY) && features) {
2157                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2158                        "unsupported option features (%Lx).\n",
2159                        (unsigned long long)features);
2160                 err = -EINVAL;
2161                 goto fail_alloc;
2162         }
2163
2164         btrfs_init_workers(&fs_info->generic_worker,
2165                            "genwork", 1, NULL);
2166
2167         btrfs_init_workers(&fs_info->workers, "worker",
2168                            fs_info->thread_pool_size,
2169                            &fs_info->generic_worker);
2170
2171         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2172                            fs_info->thread_pool_size,
2173                            &fs_info->generic_worker);
2174
2175         btrfs_init_workers(&fs_info->submit_workers, "submit",
2176                            min_t(u64, fs_devices->num_devices,
2177                            fs_info->thread_pool_size),
2178                            &fs_info->generic_worker);
2179
2180         btrfs_init_workers(&fs_info->caching_workers, "cache",
2181                            2, &fs_info->generic_worker);
2182
2183         /* a higher idle thresh on the submit workers makes it much more
2184          * likely that bios will be send down in a sane order to the
2185          * devices
2186          */
2187         fs_info->submit_workers.idle_thresh = 64;
2188
2189         fs_info->workers.idle_thresh = 16;
2190         fs_info->workers.ordered = 1;
2191
2192         fs_info->delalloc_workers.idle_thresh = 2;
2193         fs_info->delalloc_workers.ordered = 1;
2194
2195         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2196                            &fs_info->generic_worker);
2197         btrfs_init_workers(&fs_info->endio_workers, "endio",
2198                            fs_info->thread_pool_size,
2199                            &fs_info->generic_worker);
2200         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2201                            fs_info->thread_pool_size,
2202                            &fs_info->generic_worker);
2203         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2204                            "endio-meta-write", fs_info->thread_pool_size,
2205                            &fs_info->generic_worker);
2206         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2207                            fs_info->thread_pool_size,
2208                            &fs_info->generic_worker);
2209         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2210                            1, &fs_info->generic_worker);
2211         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2212                            fs_info->thread_pool_size,
2213                            &fs_info->generic_worker);
2214         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2215                            fs_info->thread_pool_size,
2216                            &fs_info->generic_worker);
2217
2218         /*
2219          * endios are largely parallel and should have a very
2220          * low idle thresh
2221          */
2222         fs_info->endio_workers.idle_thresh = 4;
2223         fs_info->endio_meta_workers.idle_thresh = 4;
2224
2225         fs_info->endio_write_workers.idle_thresh = 2;
2226         fs_info->endio_meta_write_workers.idle_thresh = 2;
2227         fs_info->readahead_workers.idle_thresh = 2;
2228
2229         /*
2230          * btrfs_start_workers can really only fail because of ENOMEM so just
2231          * return -ENOMEM if any of these fail.
2232          */
2233         ret = btrfs_start_workers(&fs_info->workers);
2234         ret |= btrfs_start_workers(&fs_info->generic_worker);
2235         ret |= btrfs_start_workers(&fs_info->submit_workers);
2236         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2237         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2238         ret |= btrfs_start_workers(&fs_info->endio_workers);
2239         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2240         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2241         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2242         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2243         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2244         ret |= btrfs_start_workers(&fs_info->caching_workers);
2245         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2246         if (ret) {
2247                 ret = -ENOMEM;
2248                 goto fail_sb_buffer;
2249         }
2250
2251         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2252         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2253                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2254
2255         tree_root->nodesize = nodesize;
2256         tree_root->leafsize = leafsize;
2257         tree_root->sectorsize = sectorsize;
2258         tree_root->stripesize = stripesize;
2259
2260         sb->s_blocksize = sectorsize;
2261         sb->s_blocksize_bits = blksize_bits(sectorsize);
2262
2263         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
2264                     sizeof(disk_super->magic))) {
2265                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2266                 goto fail_sb_buffer;
2267         }
2268
2269         if (sectorsize != PAGE_SIZE) {
2270                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2271                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2272                 goto fail_sb_buffer;
2273         }
2274
2275         mutex_lock(&fs_info->chunk_mutex);
2276         ret = btrfs_read_sys_array(tree_root);
2277         mutex_unlock(&fs_info->chunk_mutex);
2278         if (ret) {
2279                 printk(KERN_WARNING "btrfs: failed to read the system "
2280                        "array on %s\n", sb->s_id);
2281                 goto fail_sb_buffer;
2282         }
2283
2284         blocksize = btrfs_level_size(tree_root,
2285                                      btrfs_super_chunk_root_level(disk_super));
2286         generation = btrfs_super_chunk_root_generation(disk_super);
2287
2288         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2289                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2290
2291         chunk_root->node = read_tree_block(chunk_root,
2292                                            btrfs_super_chunk_root(disk_super),
2293                                            blocksize, generation);
2294         BUG_ON(!chunk_root->node); /* -ENOMEM */
2295         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2296                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2297                        sb->s_id);
2298                 goto fail_tree_roots;
2299         }
2300         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2301         chunk_root->commit_root = btrfs_root_node(chunk_root);
2302
2303         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2304            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2305            BTRFS_UUID_SIZE);
2306
2307         ret = btrfs_read_chunk_tree(chunk_root);
2308         if (ret) {
2309                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2310                        sb->s_id);
2311                 goto fail_tree_roots;
2312         }
2313
2314         btrfs_close_extra_devices(fs_devices);
2315
2316         if (!fs_devices->latest_bdev) {
2317                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2318                        sb->s_id);
2319                 goto fail_tree_roots;
2320         }
2321
2322 retry_root_backup:
2323         blocksize = btrfs_level_size(tree_root,
2324                                      btrfs_super_root_level(disk_super));
2325         generation = btrfs_super_generation(disk_super);
2326
2327         tree_root->node = read_tree_block(tree_root,
2328                                           btrfs_super_root(disk_super),
2329                                           blocksize, generation);
2330         if (!tree_root->node ||
2331             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2332                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2333                        sb->s_id);
2334
2335                 goto recovery_tree_root;
2336         }
2337
2338         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2339         tree_root->commit_root = btrfs_root_node(tree_root);
2340
2341         ret = find_and_setup_root(tree_root, fs_info,
2342                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2343         if (ret)
2344                 goto recovery_tree_root;
2345         extent_root->track_dirty = 1;
2346
2347         ret = find_and_setup_root(tree_root, fs_info,
2348                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2349         if (ret)
2350                 goto recovery_tree_root;
2351         dev_root->track_dirty = 1;
2352
2353         ret = find_and_setup_root(tree_root, fs_info,
2354                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2355         if (ret)
2356                 goto recovery_tree_root;
2357
2358         csum_root->track_dirty = 1;
2359
2360         fs_info->generation = generation;
2361         fs_info->last_trans_committed = generation;
2362
2363         ret = btrfs_init_dev_stats(fs_info);
2364         if (ret) {
2365                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2366                        ret);
2367                 goto fail_block_groups;
2368         }
2369
2370         ret = btrfs_init_space_info(fs_info);
2371         if (ret) {
2372                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2373                 goto fail_block_groups;
2374         }
2375
2376         ret = btrfs_read_block_groups(extent_root);
2377         if (ret) {
2378                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2379                 goto fail_block_groups;
2380         }
2381
2382         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2383                                                "btrfs-cleaner");
2384         if (IS_ERR(fs_info->cleaner_kthread))
2385                 goto fail_block_groups;
2386
2387         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2388                                                    tree_root,
2389                                                    "btrfs-transaction");
2390         if (IS_ERR(fs_info->transaction_kthread))
2391                 goto fail_cleaner;
2392
2393         if (!btrfs_test_opt(tree_root, SSD) &&
2394             !btrfs_test_opt(tree_root, NOSSD) &&
2395             !fs_info->fs_devices->rotating) {
2396                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2397                        "mode\n");
2398                 btrfs_set_opt(fs_info->mount_opt, SSD);
2399         }
2400
2401 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2402         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2403                 ret = btrfsic_mount(tree_root, fs_devices,
2404                                     btrfs_test_opt(tree_root,
2405                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2406                                     1 : 0,
2407                                     fs_info->check_integrity_print_mask);
2408                 if (ret)
2409                         printk(KERN_WARNING "btrfs: failed to initialize"
2410                                " integrity check module %s\n", sb->s_id);
2411         }
2412 #endif
2413
2414         /* do not make disk changes in broken FS */
2415         if (btrfs_super_log_root(disk_super) != 0 &&
2416             !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2417                 u64 bytenr = btrfs_super_log_root(disk_super);
2418
2419                 if (fs_devices->rw_devices == 0) {
2420                         printk(KERN_WARNING "Btrfs log replay required "
2421                                "on RO media\n");
2422                         err = -EIO;
2423                         goto fail_trans_kthread;
2424                 }
2425                 blocksize =
2426                      btrfs_level_size(tree_root,
2427                                       btrfs_super_log_root_level(disk_super));
2428
2429                 log_tree_root = btrfs_alloc_root(fs_info);
2430                 if (!log_tree_root) {
2431                         err = -ENOMEM;
2432                         goto fail_trans_kthread;
2433                 }
2434
2435                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2436                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2437
2438                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2439                                                       blocksize,
2440                                                       generation + 1);
2441                 /* returns with log_tree_root freed on success */
2442                 ret = btrfs_recover_log_trees(log_tree_root);
2443                 if (ret) {
2444                         btrfs_error(tree_root->fs_info, ret,
2445                                     "Failed to recover log tree");
2446                         free_extent_buffer(log_tree_root->node);
2447                         kfree(log_tree_root);
2448                         goto fail_trans_kthread;
2449                 }
2450
2451                 if (sb->s_flags & MS_RDONLY) {
2452                         ret = btrfs_commit_super(tree_root);
2453                         if (ret)
2454                                 goto fail_trans_kthread;
2455                 }
2456         }
2457
2458         ret = btrfs_find_orphan_roots(tree_root);
2459         if (ret)
2460                 goto fail_trans_kthread;
2461
2462         if (!(sb->s_flags & MS_RDONLY)) {
2463                 ret = btrfs_cleanup_fs_roots(fs_info);
2464                 if (ret) {
2465                         }
2466
2467                 ret = btrfs_recover_relocation(tree_root);
2468                 if (ret < 0) {
2469                         printk(KERN_WARNING
2470                                "btrfs: failed to recover relocation\n");
2471                         err = -EINVAL;
2472                         goto fail_trans_kthread;
2473                 }
2474         }
2475
2476         location.objectid = BTRFS_FS_TREE_OBJECTID;
2477         location.type = BTRFS_ROOT_ITEM_KEY;
2478         location.offset = (u64)-1;
2479
2480         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2481         if (!fs_info->fs_root)
2482                 goto fail_trans_kthread;
2483         if (IS_ERR(fs_info->fs_root)) {
2484                 err = PTR_ERR(fs_info->fs_root);
2485                 goto fail_trans_kthread;
2486         }
2487
2488         if (!(sb->s_flags & MS_RDONLY)) {
2489                 down_read(&fs_info->cleanup_work_sem);
2490                 err = btrfs_orphan_cleanup(fs_info->fs_root);
2491                 if (!err)
2492                         err = btrfs_orphan_cleanup(fs_info->tree_root);
2493                 up_read(&fs_info->cleanup_work_sem);
2494
2495                 if (!err)
2496                         err = btrfs_recover_balance(fs_info->tree_root);
2497
2498                 if (err) {
2499                         close_ctree(tree_root);
2500                         return err;
2501                 }
2502         }
2503
2504         return 0;
2505
2506 fail_trans_kthread:
2507         kthread_stop(fs_info->transaction_kthread);
2508 fail_cleaner:
2509         kthread_stop(fs_info->cleaner_kthread);
2510
2511         /*
2512          * make sure we're done with the btree inode before we stop our
2513          * kthreads
2514          */
2515         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2516         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2517
2518 fail_block_groups:
2519         btrfs_free_block_groups(fs_info);
2520
2521 fail_tree_roots:
2522         free_root_pointers(fs_info, 1);
2523
2524 fail_sb_buffer:
2525         btrfs_stop_workers(&fs_info->generic_worker);
2526         btrfs_stop_workers(&fs_info->readahead_workers);
2527         btrfs_stop_workers(&fs_info->fixup_workers);
2528         btrfs_stop_workers(&fs_info->delalloc_workers);
2529         btrfs_stop_workers(&fs_info->workers);
2530         btrfs_stop_workers(&fs_info->endio_workers);
2531         btrfs_stop_workers(&fs_info->endio_meta_workers);
2532         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2533         btrfs_stop_workers(&fs_info->endio_write_workers);
2534         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2535         btrfs_stop_workers(&fs_info->submit_workers);
2536         btrfs_stop_workers(&fs_info->delayed_workers);
2537         btrfs_stop_workers(&fs_info->caching_workers);
2538 fail_alloc:
2539 fail_iput:
2540         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2541
2542         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2543         iput(fs_info->btree_inode);
2544 fail_bdi:
2545         bdi_destroy(&fs_info->bdi);
2546 fail_srcu:
2547         cleanup_srcu_struct(&fs_info->subvol_srcu);
2548 fail:
2549         btrfs_close_devices(fs_info->fs_devices);
2550         return err;
2551
2552 recovery_tree_root:
2553         if (!btrfs_test_opt(tree_root, RECOVERY))
2554                 goto fail_tree_roots;
2555
2556         free_root_pointers(fs_info, 0);
2557
2558         /* don't use the log in recovery mode, it won't be valid */
2559         btrfs_set_super_log_root(disk_super, 0);
2560
2561         /* we can't trust the free space cache either */
2562         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2563
2564         ret = next_root_backup(fs_info, fs_info->super_copy,
2565                                &num_backups_tried, &backup_index);
2566         if (ret == -1)
2567                 goto fail_block_groups;
2568         goto retry_root_backup;
2569 }
2570
2571 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2572 {
2573         if (uptodate) {
2574                 set_buffer_uptodate(bh);
2575         } else {
2576                 struct btrfs_device *device = (struct btrfs_device *)
2577                         bh->b_private;
2578
2579                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2580                                           "I/O error on %s\n",
2581                                           rcu_str_deref(device->name));
2582                 /* note, we dont' set_buffer_write_io_error because we have
2583                  * our own ways of dealing with the IO errors
2584                  */
2585                 clear_buffer_uptodate(bh);
2586                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2587         }
2588         unlock_buffer(bh);
2589         put_bh(bh);
2590 }
2591
2592 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2593 {
2594         struct buffer_head *bh;
2595         struct buffer_head *latest = NULL;
2596         struct btrfs_super_block *super;
2597         int i;
2598         u64 transid = 0;
2599         u64 bytenr;
2600
2601         /* we would like to check all the supers, but that would make
2602          * a btrfs mount succeed after a mkfs from a different FS.
2603          * So, we need to add a special mount option to scan for
2604          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2605          */
2606         for (i = 0; i < 1; i++) {
2607                 bytenr = btrfs_sb_offset(i);
2608                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2609                         break;
2610                 bh = __bread(bdev, bytenr / 4096, 4096);
2611                 if (!bh)
2612                         continue;
2613
2614                 super = (struct btrfs_super_block *)bh->b_data;
2615                 if (btrfs_super_bytenr(super) != bytenr ||
2616                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2617                             sizeof(super->magic))) {
2618                         brelse(bh);
2619                         continue;
2620                 }
2621
2622                 if (!latest || btrfs_super_generation(super) > transid) {
2623                         brelse(latest);
2624                         latest = bh;
2625                         transid = btrfs_super_generation(super);
2626                 } else {
2627                         brelse(bh);
2628                 }
2629         }
2630         return latest;
2631 }
2632
2633 /*
2634  * this should be called twice, once with wait == 0 and
2635  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2636  * we write are pinned.
2637  *
2638  * They are released when wait == 1 is done.
2639  * max_mirrors must be the same for both runs, and it indicates how
2640  * many supers on this one device should be written.
2641  *
2642  * max_mirrors == 0 means to write them all.
2643  */
2644 static int write_dev_supers(struct btrfs_device *device,
2645                             struct btrfs_super_block *sb,
2646                             int do_barriers, int wait, int max_mirrors)
2647 {
2648         struct buffer_head *bh;
2649         int i;
2650         int ret;
2651         int errors = 0;
2652         u32 crc;
2653         u64 bytenr;
2654
2655         if (max_mirrors == 0)
2656                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2657
2658         for (i = 0; i < max_mirrors; i++) {
2659                 bytenr = btrfs_sb_offset(i);
2660                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2661                         break;
2662
2663                 if (wait) {
2664                         bh = __find_get_block(device->bdev, bytenr / 4096,
2665                                               BTRFS_SUPER_INFO_SIZE);
2666                         BUG_ON(!bh);
2667                         wait_on_buffer(bh);
2668                         if (!buffer_uptodate(bh))
2669                                 errors++;
2670
2671                         /* drop our reference */
2672                         brelse(bh);
2673
2674                         /* drop the reference from the wait == 0 run */
2675                         brelse(bh);
2676                         continue;
2677                 } else {
2678                         btrfs_set_super_bytenr(sb, bytenr);
2679
2680                         crc = ~(u32)0;
2681                         crc = btrfs_csum_data(NULL, (char *)sb +
2682                                               BTRFS_CSUM_SIZE, crc,
2683                                               BTRFS_SUPER_INFO_SIZE -
2684                                               BTRFS_CSUM_SIZE);
2685                         btrfs_csum_final(crc, sb->csum);
2686
2687                         /*
2688                          * one reference for us, and we leave it for the
2689                          * caller
2690                          */
2691                         bh = __getblk(device->bdev, bytenr / 4096,
2692                                       BTRFS_SUPER_INFO_SIZE);
2693                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2694
2695                         /* one reference for submit_bh */
2696                         get_bh(bh);
2697
2698                         set_buffer_uptodate(bh);
2699                         lock_buffer(bh);
2700                         bh->b_end_io = btrfs_end_buffer_write_sync;
2701                         bh->b_private = device;
2702                 }
2703
2704                 /*
2705                  * we fua the first super.  The others we allow
2706                  * to go down lazy.
2707                  */
2708                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
2709                 if (ret)
2710                         errors++;
2711         }
2712         return errors < i ? 0 : -1;
2713 }
2714
2715 /*
2716  * endio for the write_dev_flush, this will wake anyone waiting
2717  * for the barrier when it is done
2718  */
2719 static void btrfs_end_empty_barrier(struct bio *bio, int err)
2720 {
2721         if (err) {
2722                 if (err == -EOPNOTSUPP)
2723                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2724                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2725         }
2726         if (bio->bi_private)
2727                 complete(bio->bi_private);
2728         bio_put(bio);
2729 }
2730
2731 /*
2732  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
2733  * sent down.  With wait == 1, it waits for the previous flush.
2734  *
2735  * any device where the flush fails with eopnotsupp are flagged as not-barrier
2736  * capable
2737  */
2738 static int write_dev_flush(struct btrfs_device *device, int wait)
2739 {
2740         struct bio *bio;
2741         int ret = 0;
2742
2743         if (device->nobarriers)
2744                 return 0;
2745
2746         if (wait) {
2747                 bio = device->flush_bio;
2748                 if (!bio)
2749                         return 0;
2750
2751                 wait_for_completion(&device->flush_wait);
2752
2753                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2754                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
2755                                       rcu_str_deref(device->name));
2756                         device->nobarriers = 1;
2757                 }
2758                 if (!bio_flagged(bio, BIO_UPTODATE)) {
2759                         ret = -EIO;
2760                         if (!bio_flagged(bio, BIO_EOPNOTSUPP))
2761                                 btrfs_dev_stat_inc_and_print(device,
2762                                         BTRFS_DEV_STAT_FLUSH_ERRS);
2763                 }
2764
2765                 /* drop the reference from the wait == 0 run */
2766                 bio_put(bio);
2767                 device->flush_bio = NULL;
2768
2769                 return ret;
2770         }
2771
2772         /*
2773          * one reference for us, and we leave it for the
2774          * caller
2775          */
2776         device->flush_bio = NULL;
2777         bio = bio_alloc(GFP_NOFS, 0);
2778         if (!bio)
2779                 return -ENOMEM;
2780
2781         bio->bi_end_io = btrfs_end_empty_barrier;
2782         bio->bi_bdev = device->bdev;
2783         init_completion(&device->flush_wait);
2784         bio->bi_private = &device->flush_wait;
2785         device->flush_bio = bio;
2786
2787         bio_get(bio);
2788         btrfsic_submit_bio(WRITE_FLUSH, bio);
2789
2790         return 0;
2791 }
2792
2793 /*
2794  * send an empty flush down to each device in parallel,
2795  * then wait for them
2796  */
2797 static int barrier_all_devices(struct btrfs_fs_info *info)
2798 {
2799         struct list_head *head;
2800         struct btrfs_device *dev;
2801         int errors = 0;
2802         int ret;
2803
2804         /* send down all the barriers */
2805         head = &info->fs_devices->devices;
2806         list_for_each_entry_rcu(dev, head, dev_list) {
2807                 if (!dev->bdev) {
2808                         errors++;
2809                         continue;
2810                 }
2811                 if (!dev->in_fs_metadata || !dev->writeable)
2812                         continue;
2813
2814                 ret = write_dev_flush(dev, 0);
2815                 if (ret)
2816                         errors++;
2817         }
2818
2819         /* wait for all the barriers */
2820         list_for_each_entry_rcu(dev, head, dev_list) {
2821                 if (!dev->bdev) {
2822                         errors++;
2823                         continue;
2824                 }
2825                 if (!dev->in_fs_metadata || !dev->writeable)
2826                         continue;
2827
2828                 ret = write_dev_flush(dev, 1);
2829                 if (ret)
2830                         errors++;
2831         }
2832         if (errors)
2833                 return -EIO;
2834         return 0;
2835 }
2836
2837 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2838 {
2839         struct list_head *head;
2840         struct btrfs_device *dev;
2841         struct btrfs_super_block *sb;
2842         struct btrfs_dev_item *dev_item;
2843         int ret;
2844         int do_barriers;
2845         int max_errors;
2846         int total_errors = 0;
2847         u64 flags;
2848
2849         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
2850         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2851         backup_super_roots(root->fs_info);
2852
2853         sb = root->fs_info->super_for_commit;
2854         dev_item = &sb->dev_item;
2855
2856         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2857         head = &root->fs_info->fs_devices->devices;
2858
2859         if (do_barriers)
2860                 barrier_all_devices(root->fs_info);
2861
2862         list_for_each_entry_rcu(dev, head, dev_list) {
2863                 if (!dev->bdev) {
2864                         total_errors++;
2865                         continue;
2866                 }
2867                 if (!dev->in_fs_metadata || !dev->writeable)
2868                         continue;
2869
2870                 btrfs_set_stack_device_generation(dev_item, 0);
2871                 btrfs_set_stack_device_type(dev_item, dev->type);
2872                 btrfs_set_stack_device_id(dev_item, dev->devid);
2873                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2874                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2875                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2876                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2877                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2878                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2879                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2880
2881                 flags = btrfs_super_flags(sb);
2882                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2883
2884                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2885                 if (ret)
2886                         total_errors++;
2887         }
2888         if (total_errors > max_errors) {
2889                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2890                        total_errors);
2891
2892                 /* This shouldn't happen. FUA is masked off if unsupported */
2893                 BUG();
2894         }
2895
2896         total_errors = 0;
2897         list_for_each_entry_rcu(dev, head, dev_list) {
2898                 if (!dev->bdev)
2899                         continue;
2900                 if (!dev->in_fs_metadata || !dev->writeable)
2901                         continue;
2902
2903                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2904                 if (ret)
2905                         total_errors++;
2906         }
2907         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2908         if (total_errors > max_errors) {
2909                 btrfs_error(root->fs_info, -EIO,
2910                             "%d errors while writing supers", total_errors);
2911                 return -EIO;
2912         }
2913         return 0;
2914 }
2915
2916 int write_ctree_super(struct btrfs_trans_handle *trans,
2917                       struct btrfs_root *root, int max_mirrors)
2918 {
2919         int ret;
2920
2921         ret = write_all_supers(root, max_mirrors);
2922         return ret;
2923 }
2924
2925 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2926 {
2927         spin_lock(&fs_info->fs_roots_radix_lock);
2928         radix_tree_delete(&fs_info->fs_roots_radix,
2929                           (unsigned long)root->root_key.objectid);
2930         spin_unlock(&fs_info->fs_roots_radix_lock);
2931
2932         if (btrfs_root_refs(&root->root_item) == 0)
2933                 synchronize_srcu(&fs_info->subvol_srcu);
2934
2935         __btrfs_remove_free_space_cache(root->free_ino_pinned);
2936         __btrfs_remove_free_space_cache(root->free_ino_ctl);
2937         free_fs_root(root);
2938 }
2939
2940 static void free_fs_root(struct btrfs_root *root)
2941 {
2942         iput(root->cache_inode);
2943         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2944         if (root->anon_dev)
2945                 free_anon_bdev(root->anon_dev);
2946         free_extent_buffer(root->node);
2947         free_extent_buffer(root->commit_root);
2948         kfree(root->free_ino_ctl);
2949         kfree(root->free_ino_pinned);
2950         kfree(root->name);
2951         kfree(root);
2952 }
2953
2954 static void del_fs_roots(struct btrfs_fs_info *fs_info)
2955 {
2956         int ret;
2957         struct btrfs_root *gang[8];
2958         int i;
2959
2960         while (!list_empty(&fs_info->dead_roots)) {
2961                 gang[0] = list_entry(fs_info->dead_roots.next,
2962                                      struct btrfs_root, root_list);
2963                 list_del(&gang[0]->root_list);
2964
2965                 if (gang[0]->in_radix) {
2966                         btrfs_free_fs_root(fs_info, gang[0]);
2967                 } else {
2968                         free_extent_buffer(gang[0]->node);
2969                         free_extent_buffer(gang[0]->commit_root);
2970                         kfree(gang[0]);
2971                 }
2972         }
2973
2974         while (1) {
2975                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2976                                              (void **)gang, 0,
2977                                              ARRAY_SIZE(gang));
2978                 if (!ret)
2979                         break;
2980                 for (i = 0; i < ret; i++)
2981                         btrfs_free_fs_root(fs_info, gang[i]);
2982         }
2983 }
2984
2985 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2986 {
2987         u64 root_objectid = 0;
2988         struct btrfs_root *gang[8];
2989         int i;
2990         int ret;
2991
2992         while (1) {
2993                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2994                                              (void **)gang, root_objectid,
2995                                              ARRAY_SIZE(gang));
2996                 if (!ret)
2997                         break;
2998
2999                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3000                 for (i = 0; i < ret; i++) {
3001                         int err;
3002
3003                         root_objectid = gang[i]->root_key.objectid;
3004                         err = btrfs_orphan_cleanup(gang[i]);
3005                         if (err)
3006                                 return err;
3007                 }
3008                 root_objectid++;
3009         }
3010         return 0;
3011 }
3012
3013 int btrfs_commit_super(struct btrfs_root *root)
3014 {
3015         struct btrfs_trans_handle *trans;
3016         int ret;
3017
3018         mutex_lock(&root->fs_info->cleaner_mutex);
3019         btrfs_run_delayed_iputs(root);
3020         btrfs_clean_old_snapshots(root);
3021         mutex_unlock(&root->fs_info->cleaner_mutex);
3022
3023         /* wait until ongoing cleanup work done */
3024         down_write(&root->fs_info->cleanup_work_sem);
3025         up_write(&root->fs_info->cleanup_work_sem);
3026
3027         trans = btrfs_join_transaction(root);
3028         if (IS_ERR(trans))
3029                 return PTR_ERR(trans);
3030         ret = btrfs_commit_transaction(trans, root);
3031         if (ret)
3032                 return ret;
3033         /* run commit again to drop the original snapshot */
3034         trans = btrfs_join_transaction(root);
3035         if (IS_ERR(trans))
3036                 return PTR_ERR(trans);
3037         ret = btrfs_commit_transaction(trans, root);
3038         if (ret)
3039                 return ret;
3040         ret = btrfs_write_and_wait_transaction(NULL, root);
3041         if (ret) {
3042                 btrfs_error(root->fs_info, ret,
3043                             "Failed to sync btree inode to disk.");
3044                 return ret;
3045         }
3046
3047         ret = write_ctree_super(NULL, root, 0);
3048         return ret;
3049 }
3050
3051 int close_ctree(struct btrfs_root *root)
3052 {
3053         struct btrfs_fs_info *fs_info = root->fs_info;
3054         int ret;
3055
3056         fs_info->closing = 1;
3057         smp_mb();
3058
3059         /* pause restriper - we want to resume on mount */
3060         btrfs_pause_balance(root->fs_info);
3061
3062         btrfs_scrub_cancel(root);
3063
3064         /* wait for any defraggers to finish */
3065         wait_event(fs_info->transaction_wait,
3066                    (atomic_read(&fs_info->defrag_running) == 0));
3067
3068         /* clear out the rbtree of defraggable inodes */
3069         btrfs_run_defrag_inodes(fs_info);
3070
3071         /*
3072          * Here come 2 situations when btrfs is broken to flip readonly:
3073          *
3074          * 1. when btrfs flips readonly somewhere else before
3075          * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
3076          * and btrfs will skip to write sb directly to keep
3077          * ERROR state on disk.
3078          *
3079          * 2. when btrfs flips readonly just in btrfs_commit_super,
3080          * and in such case, btrfs cannot write sb via btrfs_commit_super,
3081          * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
3082          * btrfs will cleanup all FS resources first and write sb then.
3083          */
3084         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3085                 ret = btrfs_commit_super(root);
3086                 if (ret)
3087                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3088         }
3089
3090         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3091                 ret = btrfs_error_commit_super(root);
3092                 if (ret)
3093                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3094         }
3095
3096         btrfs_put_block_group_cache(fs_info);
3097
3098         kthread_stop(fs_info->transaction_kthread);
3099         kthread_stop(fs_info->cleaner_kthread);
3100
3101         fs_info->closing = 2;
3102         smp_mb();
3103
3104         if (fs_info->delalloc_bytes) {
3105                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
3106                        (unsigned long long)fs_info->delalloc_bytes);
3107         }
3108         if (fs_info->total_ref_cache_size) {
3109                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
3110                        (unsigned long long)fs_info->total_ref_cache_size);
3111         }
3112
3113         free_extent_buffer(fs_info->extent_root->node);
3114         free_extent_buffer(fs_info->extent_root->commit_root);
3115         free_extent_buffer(fs_info->tree_root->node);
3116         free_extent_buffer(fs_info->tree_root->commit_root);
3117         free_extent_buffer(fs_info->chunk_root->node);
3118         free_extent_buffer(fs_info->chunk_root->commit_root);
3119         free_extent_buffer(fs_info->dev_root->node);
3120         free_extent_buffer(fs_info->dev_root->commit_root);
3121         free_extent_buffer(fs_info->csum_root->node);
3122         free_extent_buffer(fs_info->csum_root->commit_root);
3123
3124         btrfs_free_block_groups(fs_info);
3125
3126         del_fs_roots(fs_info);
3127
3128         iput(fs_info->btree_inode);
3129
3130         btrfs_stop_workers(&fs_info->generic_worker);
3131         btrfs_stop_workers(&fs_info->fixup_workers);
3132         btrfs_stop_workers(&fs_info->delalloc_workers);
3133         btrfs_stop_workers(&fs_info->workers);
3134         btrfs_stop_workers(&fs_info->endio_workers);
3135         btrfs_stop_workers(&fs_info->endio_meta_workers);
3136         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
3137         btrfs_stop_workers(&fs_info->endio_write_workers);
3138         btrfs_stop_workers(&fs_info->endio_freespace_worker);
3139         btrfs_stop_workers(&fs_info->submit_workers);
3140         btrfs_stop_workers(&fs_info->delayed_workers);
3141         btrfs_stop_workers(&fs_info->caching_workers);
3142         btrfs_stop_workers(&fs_info->readahead_workers);
3143
3144 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3145         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3146                 btrfsic_unmount(root, fs_info->fs_devices);
3147 #endif
3148
3149         btrfs_close_devices(fs_info->fs_devices);
3150         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3151
3152         bdi_destroy(&fs_info->bdi);
3153         cleanup_srcu_struct(&fs_info->subvol_srcu);
3154
3155         return 0;
3156 }
3157
3158 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3159                           int atomic)
3160 {
3161         int ret;
3162         struct inode *btree_inode = buf->pages[0]->mapping->host;
3163
3164         ret = extent_buffer_uptodate(buf);
3165         if (!ret)
3166                 return ret;
3167
3168         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3169                                     parent_transid, atomic);
3170         if (ret == -EAGAIN)
3171                 return ret;
3172         return !ret;
3173 }
3174
3175 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3176 {
3177         return set_extent_buffer_uptodate(buf);
3178 }
3179
3180 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3181 {
3182         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3183         u64 transid = btrfs_header_generation(buf);
3184         int was_dirty;
3185
3186         btrfs_assert_tree_locked(buf);
3187         if (transid != root->fs_info->generation) {
3188                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
3189                        "found %llu running %llu\n",
3190                         (unsigned long long)buf->start,
3191                         (unsigned long long)transid,
3192                         (unsigned long long)root->fs_info->generation);
3193                 WARN_ON(1);
3194         }
3195         was_dirty = set_extent_buffer_dirty(buf);
3196         if (!was_dirty) {
3197                 spin_lock(&root->fs_info->delalloc_lock);
3198                 root->fs_info->dirty_metadata_bytes += buf->len;
3199                 spin_unlock(&root->fs_info->delalloc_lock);
3200         }
3201 }
3202
3203 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3204 {
3205         /*
3206          * looks as though older kernels can get into trouble with
3207          * this code, they end up stuck in balance_dirty_pages forever
3208          */
3209         u64 num_dirty;
3210         unsigned long thresh = 32 * 1024 * 1024;
3211
3212         if (current->flags & PF_MEMALLOC)
3213                 return;
3214
3215         btrfs_balance_delayed_items(root);
3216
3217         num_dirty = root->fs_info->dirty_metadata_bytes;
3218
3219         if (num_dirty > thresh) {
3220                 balance_dirty_pages_ratelimited_nr(
3221                                    root->fs_info->btree_inode->i_mapping, 1);
3222         }
3223         return;
3224 }
3225
3226 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3227 {
3228         /*
3229          * looks as though older kernels can get into trouble with
3230          * this code, they end up stuck in balance_dirty_pages forever
3231          */
3232         u64 num_dirty;
3233         unsigned long thresh = 32 * 1024 * 1024;
3234
3235         if (current->flags & PF_MEMALLOC)
3236                 return;
3237
3238         num_dirty = root->fs_info->dirty_metadata_bytes;
3239
3240         if (num_dirty > thresh) {
3241                 balance_dirty_pages_ratelimited_nr(
3242                                    root->fs_info->btree_inode->i_mapping, 1);
3243         }
3244         return;
3245 }
3246
3247 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3248 {
3249         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3250         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3251 }
3252
3253 static int btree_lock_page_hook(struct page *page, void *data,
3254                                 void (*flush_fn)(void *))
3255 {
3256         struct inode *inode = page->mapping->host;
3257         struct btrfs_root *root = BTRFS_I(inode)->root;
3258         struct extent_buffer *eb;
3259
3260         /*
3261          * We culled this eb but the page is still hanging out on the mapping,
3262          * carry on.
3263          */
3264         if (!PagePrivate(page))
3265                 goto out;
3266
3267         eb = (struct extent_buffer *)page->private;
3268         if (!eb) {
3269                 WARN_ON(1);
3270                 goto out;
3271         }
3272         if (page != eb->pages[0])
3273                 goto out;
3274
3275         if (!btrfs_try_tree_write_lock(eb)) {
3276                 flush_fn(data);
3277                 btrfs_tree_lock(eb);
3278         }
3279         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3280
3281         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3282                 spin_lock(&root->fs_info->delalloc_lock);
3283                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
3284                         root->fs_info->dirty_metadata_bytes -= eb->len;
3285                 else
3286                         WARN_ON(1);
3287                 spin_unlock(&root->fs_info->delalloc_lock);
3288         }
3289
3290         btrfs_tree_unlock(eb);
3291 out:
3292         if (!trylock_page(page)) {
3293                 flush_fn(data);
3294                 lock_page(page);
3295         }
3296         return 0;
3297 }
3298
3299 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3300                               int read_only)
3301 {
3302         if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
3303                 printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
3304                 return -EINVAL;
3305         }
3306
3307         if (read_only)
3308                 return 0;
3309
3310         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3311                 printk(KERN_WARNING "warning: mount fs with errors, "
3312                        "running btrfsck is recommended\n");
3313         }
3314
3315         return 0;
3316 }
3317
3318 int btrfs_error_commit_super(struct btrfs_root *root)
3319 {
3320         int ret;
3321
3322         mutex_lock(&root->fs_info->cleaner_mutex);
3323         btrfs_run_delayed_iputs(root);
3324         mutex_unlock(&root->fs_info->cleaner_mutex);
3325
3326         down_write(&root->fs_info->cleanup_work_sem);
3327         up_write(&root->fs_info->cleanup_work_sem);
3328
3329         /* cleanup FS via transaction */
3330         btrfs_cleanup_transaction(root);
3331
3332         ret = write_ctree_super(NULL, root, 0);
3333
3334         return ret;
3335 }
3336
3337 static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3338 {
3339         struct btrfs_inode *btrfs_inode;
3340         struct list_head splice;
3341
3342         INIT_LIST_HEAD(&splice);
3343
3344         mutex_lock(&root->fs_info->ordered_operations_mutex);
3345         spin_lock(&root->fs_info->ordered_extent_lock);
3346
3347         list_splice_init(&root->fs_info->ordered_operations, &splice);
3348         while (!list_empty(&splice)) {
3349                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3350                                          ordered_operations);
3351
3352                 list_del_init(&btrfs_inode->ordered_operations);
3353
3354                 btrfs_invalidate_inodes(btrfs_inode->root);
3355         }
3356
3357         spin_unlock(&root->fs_info->ordered_extent_lock);
3358         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3359 }
3360
3361 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3362 {
3363         struct list_head splice;
3364         struct btrfs_ordered_extent *ordered;
3365         struct inode *inode;
3366
3367         INIT_LIST_HEAD(&splice);
3368
3369         spin_lock(&root->fs_info->ordered_extent_lock);
3370
3371         list_splice_init(&root->fs_info->ordered_extents, &splice);
3372         while (!list_empty(&splice)) {
3373                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
3374                                      root_extent_list);
3375
3376                 list_del_init(&ordered->root_extent_list);
3377                 atomic_inc(&ordered->refs);
3378
3379                 /* the inode may be getting freed (in sys_unlink path). */
3380                 inode = igrab(ordered->inode);
3381
3382                 spin_unlock(&root->fs_info->ordered_extent_lock);
3383                 if (inode)
3384                         iput(inode);
3385
3386                 atomic_set(&ordered->refs, 1);
3387                 btrfs_put_ordered_extent(ordered);
3388
3389                 spin_lock(&root->fs_info->ordered_extent_lock);
3390         }
3391
3392         spin_unlock(&root->fs_info->ordered_extent_lock);
3393 }
3394
3395 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3396                                struct btrfs_root *root)
3397 {
3398         struct rb_node *node;
3399         struct btrfs_delayed_ref_root *delayed_refs;
3400         struct btrfs_delayed_ref_node *ref;
3401         int ret = 0;
3402
3403         delayed_refs = &trans->delayed_refs;
3404
3405         spin_lock(&delayed_refs->lock);
3406         if (delayed_refs->num_entries == 0) {
3407                 spin_unlock(&delayed_refs->lock);
3408                 printk(KERN_INFO "delayed_refs has NO entry\n");
3409                 return ret;
3410         }
3411
3412         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3413                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3414
3415                 atomic_set(&ref->refs, 1);
3416                 if (btrfs_delayed_ref_is_head(ref)) {
3417                         struct btrfs_delayed_ref_head *head;
3418
3419                         head = btrfs_delayed_node_to_head(ref);
3420                         if (!mutex_trylock(&head->mutex)) {
3421                                 atomic_inc(&ref->refs);
3422                                 spin_unlock(&delayed_refs->lock);
3423
3424                                 /* Need to wait for the delayed ref to run */
3425                                 mutex_lock(&head->mutex);
3426                                 mutex_unlock(&head->mutex);
3427                                 btrfs_put_delayed_ref(ref);
3428
3429                                 continue;
3430                         }
3431
3432                         kfree(head->extent_op);
3433                         delayed_refs->num_heads--;
3434                         if (list_empty(&head->cluster))
3435                                 delayed_refs->num_heads_ready--;
3436                         list_del_init(&head->cluster);
3437                 }
3438                 ref->in_tree = 0;
3439                 rb_erase(&ref->rb_node, &delayed_refs->root);
3440                 delayed_refs->num_entries--;
3441
3442                 spin_unlock(&delayed_refs->lock);
3443                 btrfs_put_delayed_ref(ref);
3444
3445                 cond_resched();
3446                 spin_lock(&delayed_refs->lock);
3447         }
3448
3449         spin_unlock(&delayed_refs->lock);
3450
3451         return ret;
3452 }
3453
3454 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3455 {
3456         struct btrfs_pending_snapshot *snapshot;
3457         struct list_head splice;
3458
3459         INIT_LIST_HEAD(&splice);
3460
3461         list_splice_init(&t->pending_snapshots, &splice);
3462
3463         while (!list_empty(&splice)) {
3464                 snapshot = list_entry(splice.next,
3465                                       struct btrfs_pending_snapshot,
3466                                       list);
3467
3468                 list_del_init(&snapshot->list);
3469
3470                 kfree(snapshot);
3471         }
3472 }
3473
3474 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3475 {
3476         struct btrfs_inode *btrfs_inode;
3477         struct list_head splice;
3478
3479         INIT_LIST_HEAD(&splice);
3480
3481         spin_lock(&root->fs_info->delalloc_lock);
3482         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3483
3484         while (!list_empty(&splice)) {
3485                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3486                                     delalloc_inodes);
3487
3488                 list_del_init(&btrfs_inode->delalloc_inodes);
3489
3490                 btrfs_invalidate_inodes(btrfs_inode->root);
3491         }
3492
3493         spin_unlock(&root->fs_info->delalloc_lock);
3494 }
3495
3496 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3497                                         struct extent_io_tree *dirty_pages,
3498                                         int mark)
3499 {
3500         int ret;
3501         struct page *page;
3502         struct inode *btree_inode = root->fs_info->btree_inode;
3503         struct extent_buffer *eb;
3504         u64 start = 0;
3505         u64 end;
3506         u64 offset;
3507         unsigned long index;
3508
3509         while (1) {
3510                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3511                                             mark);
3512                 if (ret)
3513                         break;
3514
3515                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3516                 while (start <= end) {
3517                         index = start >> PAGE_CACHE_SHIFT;
3518                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
3519                         page = find_get_page(btree_inode->i_mapping, index);
3520                         if (!page)
3521                                 continue;
3522                         offset = page_offset(page);
3523
3524                         spin_lock(&dirty_pages->buffer_lock);
3525                         eb = radix_tree_lookup(
3526                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3527                                                offset >> PAGE_CACHE_SHIFT);
3528                         spin_unlock(&dirty_pages->buffer_lock);
3529                         if (eb)
3530                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3531                                                          &eb->bflags);
3532                         if (PageWriteback(page))
3533                                 end_page_writeback(page);
3534
3535                         lock_page(page);
3536                         if (PageDirty(page)) {
3537                                 clear_page_dirty_for_io(page);
3538                                 spin_lock_irq(&page->mapping->tree_lock);
3539                                 radix_tree_tag_clear(&page->mapping->page_tree,
3540                                                         page_index(page),
3541                                                         PAGECACHE_TAG_DIRTY);
3542                                 spin_unlock_irq(&page->mapping->tree_lock);
3543                         }
3544
3545                         unlock_page(page);
3546                         page_cache_release(page);
3547                 }
3548         }
3549
3550         return ret;
3551 }
3552
3553 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3554                                        struct extent_io_tree *pinned_extents)
3555 {
3556         struct extent_io_tree *unpin;
3557         u64 start;
3558         u64 end;
3559         int ret;
3560         bool loop = true;
3561
3562         unpin = pinned_extents;
3563 again:
3564         while (1) {
3565                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3566                                             EXTENT_DIRTY);
3567                 if (ret)
3568                         break;
3569
3570                 /* opt_discard */
3571                 if (btrfs_test_opt(root, DISCARD))
3572                         ret = btrfs_error_discard_extent(root, start,
3573                                                          end + 1 - start,
3574                                                          NULL);
3575
3576                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3577                 btrfs_error_unpin_extent_range(root, start, end);
3578                 cond_resched();
3579         }
3580
3581         if (loop) {
3582                 if (unpin == &root->fs_info->freed_extents[0])
3583                         unpin = &root->fs_info->freed_extents[1];
3584                 else
3585                         unpin = &root->fs_info->freed_extents[0];
3586                 loop = false;
3587                 goto again;
3588         }
3589
3590         return 0;
3591 }
3592
3593 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3594                                    struct btrfs_root *root)
3595 {
3596         btrfs_destroy_delayed_refs(cur_trans, root);
3597         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3598                                 cur_trans->dirty_pages.dirty_bytes);
3599
3600         /* FIXME: cleanup wait for commit */
3601         cur_trans->in_commit = 1;
3602         cur_trans->blocked = 1;
3603         wake_up(&root->fs_info->transaction_blocked_wait);
3604
3605         cur_trans->blocked = 0;
3606         wake_up(&root->fs_info->transaction_wait);
3607
3608         cur_trans->commit_done = 1;
3609         wake_up(&cur_trans->commit_wait);
3610
3611         btrfs_destroy_delayed_inodes(root);
3612         btrfs_assert_delayed_root_empty(root);
3613
3614         btrfs_destroy_pending_snapshots(cur_trans);
3615
3616         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3617                                      EXTENT_DIRTY);
3618         btrfs_destroy_pinned_extent(root,
3619                                     root->fs_info->pinned_extents);
3620
3621         /*
3622         memset(cur_trans, 0, sizeof(*cur_trans));
3623         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3624         */
3625 }
3626
3627 int btrfs_cleanup_transaction(struct btrfs_root *root)
3628 {
3629         struct btrfs_transaction *t;
3630         LIST_HEAD(list);
3631
3632         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3633
3634         spin_lock(&root->fs_info->trans_lock);
3635         list_splice_init(&root->fs_info->trans_list, &list);
3636         root->fs_info->trans_no_join = 1;
3637         spin_unlock(&root->fs_info->trans_lock);
3638
3639         while (!list_empty(&list)) {
3640                 t = list_entry(list.next, struct btrfs_transaction, list);
3641                 if (!t)
3642                         break;
3643
3644                 btrfs_destroy_ordered_operations(root);
3645
3646                 btrfs_destroy_ordered_extents(root);
3647
3648                 btrfs_destroy_delayed_refs(t, root);
3649
3650                 btrfs_block_rsv_release(root,
3651                                         &root->fs_info->trans_block_rsv,
3652                                         t->dirty_pages.dirty_bytes);
3653
3654                 /* FIXME: cleanup wait for commit */
3655                 t->in_commit = 1;
3656                 t->blocked = 1;
3657                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3658                         wake_up(&root->fs_info->transaction_blocked_wait);
3659
3660                 t->blocked = 0;
3661                 if (waitqueue_active(&root->fs_info->transaction_wait))
3662                         wake_up(&root->fs_info->transaction_wait);
3663
3664                 t->commit_done = 1;
3665                 if (waitqueue_active(&t->commit_wait))
3666                         wake_up(&t->commit_wait);
3667
3668                 btrfs_destroy_delayed_inodes(root);
3669                 btrfs_assert_delayed_root_empty(root);
3670
3671                 btrfs_destroy_pending_snapshots(t);
3672
3673                 btrfs_destroy_delalloc_inodes(root);
3674
3675                 spin_lock(&root->fs_info->trans_lock);
3676                 root->fs_info->running_transaction = NULL;
3677                 spin_unlock(&root->fs_info->trans_lock);
3678
3679                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3680                                              EXTENT_DIRTY);
3681
3682                 btrfs_destroy_pinned_extent(root,
3683                                             root->fs_info->pinned_extents);
3684
3685                 atomic_set(&t->use_count, 0);
3686                 list_del_init(&t->list);
3687                 memset(t, 0, sizeof(*t));
3688                 kmem_cache_free(btrfs_transaction_cachep, t);
3689         }
3690
3691         spin_lock(&root->fs_info->trans_lock);
3692         root->fs_info->trans_no_join = 0;
3693         spin_unlock(&root->fs_info->trans_lock);
3694         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3695
3696         return 0;
3697 }
3698
3699 static struct extent_io_ops btree_extent_io_ops = {
3700         .write_cache_pages_lock_hook = btree_lock_page_hook,
3701         .readpage_end_io_hook = btree_readpage_end_io_hook,
3702         .readpage_io_failed_hook = btree_io_failed_hook,
3703         .submit_bio_hook = btree_submit_bio_hook,
3704         /* note we're sharing with inode.c for the merge bio hook */
3705         .merge_bio_hook = btrfs_merge_bio_hook,
3706 };