1 #include <linux/bitops.h>
2 #include <linux/slab.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include "extent_io.h"
14 #include "extent_map.h"
17 #include "btrfs_inode.h"
19 static struct kmem_cache *extent_state_cache;
20 static struct kmem_cache *extent_buffer_cache;
22 static LIST_HEAD(buffers);
23 static LIST_HEAD(states);
27 static DEFINE_SPINLOCK(leak_lock);
30 #define BUFFER_LRU_MAX 64
35 struct rb_node rb_node;
38 struct extent_page_data {
40 struct extent_io_tree *tree;
41 get_extent_t *get_extent;
43 /* tells writepage not to lock the state bits for this range
44 * it still does the unlocking
46 unsigned int extent_locked:1;
48 /* tells the submit_bio code to use a WRITE_SYNC */
49 unsigned int sync_io:1;
52 int __init extent_io_init(void)
54 extent_state_cache = kmem_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
56 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
57 if (!extent_state_cache)
60 extent_buffer_cache = kmem_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
62 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
63 if (!extent_buffer_cache)
64 goto free_state_cache;
68 kmem_cache_destroy(extent_state_cache);
72 void extent_io_exit(void)
74 struct extent_state *state;
75 struct extent_buffer *eb;
77 while (!list_empty(&states)) {
78 state = list_entry(states.next, struct extent_state, leak_list);
79 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
80 "state %lu in tree %p refs %d\n",
81 (unsigned long long)state->start,
82 (unsigned long long)state->end,
83 state->state, state->tree, atomic_read(&state->refs));
84 list_del(&state->leak_list);
85 kmem_cache_free(extent_state_cache, state);
89 while (!list_empty(&buffers)) {
90 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
91 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
92 "refs %d\n", (unsigned long long)eb->start,
93 eb->len, atomic_read(&eb->refs));
94 list_del(&eb->leak_list);
95 kmem_cache_free(extent_buffer_cache, eb);
97 if (extent_state_cache)
98 kmem_cache_destroy(extent_state_cache);
99 if (extent_buffer_cache)
100 kmem_cache_destroy(extent_buffer_cache);
103 void extent_io_tree_init(struct extent_io_tree *tree,
104 struct address_space *mapping)
106 tree->state = RB_ROOT;
107 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
109 tree->dirty_bytes = 0;
110 spin_lock_init(&tree->lock);
111 spin_lock_init(&tree->buffer_lock);
112 tree->mapping = mapping;
115 static struct extent_state *alloc_extent_state(gfp_t mask)
117 struct extent_state *state;
122 state = kmem_cache_alloc(extent_state_cache, mask);
129 spin_lock_irqsave(&leak_lock, flags);
130 list_add(&state->leak_list, &states);
131 spin_unlock_irqrestore(&leak_lock, flags);
133 atomic_set(&state->refs, 1);
134 init_waitqueue_head(&state->wq);
138 void free_extent_state(struct extent_state *state)
142 if (atomic_dec_and_test(&state->refs)) {
146 WARN_ON(state->tree);
148 spin_lock_irqsave(&leak_lock, flags);
149 list_del(&state->leak_list);
150 spin_unlock_irqrestore(&leak_lock, flags);
152 kmem_cache_free(extent_state_cache, state);
156 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
157 struct rb_node *node)
159 struct rb_node **p = &root->rb_node;
160 struct rb_node *parent = NULL;
161 struct tree_entry *entry;
165 entry = rb_entry(parent, struct tree_entry, rb_node);
167 if (offset < entry->start)
169 else if (offset > entry->end)
175 entry = rb_entry(node, struct tree_entry, rb_node);
176 rb_link_node(node, parent, p);
177 rb_insert_color(node, root);
181 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
182 struct rb_node **prev_ret,
183 struct rb_node **next_ret)
185 struct rb_root *root = &tree->state;
186 struct rb_node *n = root->rb_node;
187 struct rb_node *prev = NULL;
188 struct rb_node *orig_prev = NULL;
189 struct tree_entry *entry;
190 struct tree_entry *prev_entry = NULL;
193 entry = rb_entry(n, struct tree_entry, rb_node);
197 if (offset < entry->start)
199 else if (offset > entry->end)
207 while (prev && offset > prev_entry->end) {
208 prev = rb_next(prev);
209 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 while (prev && offset < prev_entry->start) {
218 prev = rb_prev(prev);
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
226 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
229 struct rb_node *prev = NULL;
232 ret = __etree_search(tree, offset, &prev, NULL);
238 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
239 struct extent_state *other)
241 if (tree->ops && tree->ops->merge_extent_hook)
242 tree->ops->merge_extent_hook(tree->mapping->host, new,
247 * utility function to look for merge candidates inside a given range.
248 * Any extents with matching state are merged together into a single
249 * extent in the tree. Extents with EXTENT_IO in their state field
250 * are not merged because the end_io handlers need to be able to do
251 * operations on them without sleeping (or doing allocations/splits).
253 * This should be called with the tree lock held.
255 static int merge_state(struct extent_io_tree *tree,
256 struct extent_state *state)
258 struct extent_state *other;
259 struct rb_node *other_node;
261 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
264 other_node = rb_prev(&state->rb_node);
266 other = rb_entry(other_node, struct extent_state, rb_node);
267 if (other->end == state->start - 1 &&
268 other->state == state->state) {
269 merge_cb(tree, state, other);
270 state->start = other->start;
272 rb_erase(&other->rb_node, &tree->state);
273 free_extent_state(other);
276 other_node = rb_next(&state->rb_node);
278 other = rb_entry(other_node, struct extent_state, rb_node);
279 if (other->start == state->end + 1 &&
280 other->state == state->state) {
281 merge_cb(tree, state, other);
282 other->start = state->start;
284 rb_erase(&state->rb_node, &tree->state);
285 free_extent_state(state);
293 static int set_state_cb(struct extent_io_tree *tree,
294 struct extent_state *state, int *bits)
296 if (tree->ops && tree->ops->set_bit_hook) {
297 return tree->ops->set_bit_hook(tree->mapping->host,
304 static void clear_state_cb(struct extent_io_tree *tree,
305 struct extent_state *state, int *bits)
307 if (tree->ops && tree->ops->clear_bit_hook)
308 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
312 * insert an extent_state struct into the tree. 'bits' are set on the
313 * struct before it is inserted.
315 * This may return -EEXIST if the extent is already there, in which case the
316 * state struct is freed.
318 * The tree lock is not taken internally. This is a utility function and
319 * probably isn't what you want to call (see set/clear_extent_bit).
321 static int insert_state(struct extent_io_tree *tree,
322 struct extent_state *state, u64 start, u64 end,
325 struct rb_node *node;
326 int bits_to_set = *bits & ~EXTENT_CTLBITS;
330 printk(KERN_ERR "btrfs end < start %llu %llu\n",
331 (unsigned long long)end,
332 (unsigned long long)start);
335 state->start = start;
337 ret = set_state_cb(tree, state, bits);
341 if (bits_to_set & EXTENT_DIRTY)
342 tree->dirty_bytes += end - start + 1;
343 state->state |= bits_to_set;
344 node = tree_insert(&tree->state, end, &state->rb_node);
346 struct extent_state *found;
347 found = rb_entry(node, struct extent_state, rb_node);
348 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
349 "%llu %llu\n", (unsigned long long)found->start,
350 (unsigned long long)found->end,
351 (unsigned long long)start, (unsigned long long)end);
352 free_extent_state(state);
356 merge_state(tree, state);
360 static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
363 if (tree->ops && tree->ops->split_extent_hook)
364 return tree->ops->split_extent_hook(tree->mapping->host,
370 * split a given extent state struct in two, inserting the preallocated
371 * struct 'prealloc' as the newly created second half. 'split' indicates an
372 * offset inside 'orig' where it should be split.
375 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
376 * are two extent state structs in the tree:
377 * prealloc: [orig->start, split - 1]
378 * orig: [ split, orig->end ]
380 * The tree locks are not taken by this function. They need to be held
383 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
384 struct extent_state *prealloc, u64 split)
386 struct rb_node *node;
388 split_cb(tree, orig, split);
390 prealloc->start = orig->start;
391 prealloc->end = split - 1;
392 prealloc->state = orig->state;
395 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 free_extent_state(prealloc);
400 prealloc->tree = tree;
405 * utility function to clear some bits in an extent state struct.
406 * it will optionally wake up any one waiting on this state (wake == 1), or
407 * forcibly remove the state from the tree (delete == 1).
409 * If no bits are set on the state struct after clearing things, the
410 * struct is freed and removed from the tree
412 static int clear_state_bit(struct extent_io_tree *tree,
413 struct extent_state *state,
416 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
417 int ret = state->state & bits_to_clear;
419 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
420 u64 range = state->end - state->start + 1;
421 WARN_ON(range > tree->dirty_bytes);
422 tree->dirty_bytes -= range;
424 clear_state_cb(tree, state, bits);
425 state->state &= ~bits_to_clear;
428 if (state->state == 0) {
430 rb_erase(&state->rb_node, &tree->state);
432 free_extent_state(state);
437 merge_state(tree, state);
442 static struct extent_state *
443 alloc_extent_state_atomic(struct extent_state *prealloc)
446 prealloc = alloc_extent_state(GFP_ATOMIC);
452 * clear some bits on a range in the tree. This may require splitting
453 * or inserting elements in the tree, so the gfp mask is used to
454 * indicate which allocations or sleeping are allowed.
456 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
457 * the given range from the tree regardless of state (ie for truncate).
459 * the range [start, end] is inclusive.
461 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
462 * bits were already set, or zero if none of the bits were already set.
464 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
465 int bits, int wake, int delete,
466 struct extent_state **cached_state,
469 struct extent_state *state;
470 struct extent_state *cached;
471 struct extent_state *prealloc = NULL;
472 struct rb_node *next_node;
473 struct rb_node *node;
480 bits |= ~EXTENT_CTLBITS;
481 bits |= EXTENT_FIRST_DELALLOC;
483 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
486 if (!prealloc && (mask & __GFP_WAIT)) {
487 prealloc = alloc_extent_state(mask);
492 spin_lock(&tree->lock);
494 cached = *cached_state;
497 *cached_state = NULL;
501 if (cached && cached->tree && cached->start == start) {
503 atomic_dec(&cached->refs);
508 free_extent_state(cached);
511 * this search will find the extents that end after
514 node = tree_search(tree, start);
517 state = rb_entry(node, struct extent_state, rb_node);
519 if (state->start > end)
521 WARN_ON(state->end < start);
522 last_end = state->end;
525 * | ---- desired range ---- |
527 * | ------------- state -------------- |
529 * We need to split the extent we found, and may flip
530 * bits on second half.
532 * If the extent we found extends past our range, we
533 * just split and search again. It'll get split again
534 * the next time though.
536 * If the extent we found is inside our range, we clear
537 * the desired bit on it.
540 if (state->start < start) {
541 prealloc = alloc_extent_state_atomic(prealloc);
543 err = split_state(tree, state, prealloc, start);
544 BUG_ON(err == -EEXIST);
548 if (state->end <= end) {
549 set |= clear_state_bit(tree, state, &bits, wake);
550 if (last_end == (u64)-1)
552 start = last_end + 1;
557 * | ---- desired range ---- |
559 * We need to split the extent, and clear the bit
562 if (state->start <= end && state->end > end) {
563 prealloc = alloc_extent_state_atomic(prealloc);
565 err = split_state(tree, state, prealloc, end + 1);
566 BUG_ON(err == -EEXIST);
570 set |= clear_state_bit(tree, prealloc, &bits, wake);
576 if (state->end < end && prealloc && !need_resched())
577 next_node = rb_next(&state->rb_node);
581 set |= clear_state_bit(tree, state, &bits, wake);
582 if (last_end == (u64)-1)
584 start = last_end + 1;
585 if (start <= end && next_node) {
586 state = rb_entry(next_node, struct extent_state,
588 if (state->start == start)
594 spin_unlock(&tree->lock);
596 free_extent_state(prealloc);
603 spin_unlock(&tree->lock);
604 if (mask & __GFP_WAIT)
609 static int wait_on_state(struct extent_io_tree *tree,
610 struct extent_state *state)
611 __releases(tree->lock)
612 __acquires(tree->lock)
615 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
616 spin_unlock(&tree->lock);
618 spin_lock(&tree->lock);
619 finish_wait(&state->wq, &wait);
624 * waits for one or more bits to clear on a range in the state tree.
625 * The range [start, end] is inclusive.
626 * The tree lock is taken by this function
628 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
630 struct extent_state *state;
631 struct rb_node *node;
633 spin_lock(&tree->lock);
637 * this search will find all the extents that end after
640 node = tree_search(tree, start);
644 state = rb_entry(node, struct extent_state, rb_node);
646 if (state->start > end)
649 if (state->state & bits) {
650 start = state->start;
651 atomic_inc(&state->refs);
652 wait_on_state(tree, state);
653 free_extent_state(state);
656 start = state->end + 1;
661 if (need_resched()) {
662 spin_unlock(&tree->lock);
664 spin_lock(&tree->lock);
668 spin_unlock(&tree->lock);
672 static int set_state_bits(struct extent_io_tree *tree,
673 struct extent_state *state,
677 int bits_to_set = *bits & ~EXTENT_CTLBITS;
679 ret = set_state_cb(tree, state, bits);
682 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
683 u64 range = state->end - state->start + 1;
684 tree->dirty_bytes += range;
686 state->state |= bits_to_set;
691 static void cache_state(struct extent_state *state,
692 struct extent_state **cached_ptr)
694 if (cached_ptr && !(*cached_ptr)) {
695 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
697 atomic_inc(&state->refs);
702 static void uncache_state(struct extent_state **cached_ptr)
704 if (cached_ptr && (*cached_ptr)) {
705 struct extent_state *state = *cached_ptr;
707 free_extent_state(state);
712 * set some bits on a range in the tree. This may require allocations or
713 * sleeping, so the gfp mask is used to indicate what is allowed.
715 * If any of the exclusive bits are set, this will fail with -EEXIST if some
716 * part of the range already has the desired bits set. The start of the
717 * existing range is returned in failed_start in this case.
719 * [start, end] is inclusive This takes the tree lock.
722 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
723 int bits, int exclusive_bits, u64 *failed_start,
724 struct extent_state **cached_state, gfp_t mask)
726 struct extent_state *state;
727 struct extent_state *prealloc = NULL;
728 struct rb_node *node;
733 bits |= EXTENT_FIRST_DELALLOC;
735 if (!prealloc && (mask & __GFP_WAIT)) {
736 prealloc = alloc_extent_state(mask);
740 spin_lock(&tree->lock);
741 if (cached_state && *cached_state) {
742 state = *cached_state;
743 if (state->start == start && state->tree) {
744 node = &state->rb_node;
749 * this search will find all the extents that end after
752 node = tree_search(tree, start);
754 prealloc = alloc_extent_state_atomic(prealloc);
756 err = insert_state(tree, prealloc, start, end, &bits);
758 BUG_ON(err == -EEXIST);
761 state = rb_entry(node, struct extent_state, rb_node);
763 last_start = state->start;
764 last_end = state->end;
767 * | ---- desired range ---- |
770 * Just lock what we found and keep going
772 if (state->start == start && state->end <= end) {
773 struct rb_node *next_node;
774 if (state->state & exclusive_bits) {
775 *failed_start = state->start;
780 err = set_state_bits(tree, state, &bits);
784 next_node = rb_next(node);
785 cache_state(state, cached_state);
786 merge_state(tree, state);
787 if (last_end == (u64)-1)
790 start = last_end + 1;
791 if (next_node && start < end && prealloc && !need_resched()) {
792 state = rb_entry(next_node, struct extent_state,
794 if (state->start == start)
801 * | ---- desired range ---- |
804 * | ------------- state -------------- |
806 * We need to split the extent we found, and may flip bits on
809 * If the extent we found extends past our
810 * range, we just split and search again. It'll get split
811 * again the next time though.
813 * If the extent we found is inside our range, we set the
816 if (state->start < start) {
817 if (state->state & exclusive_bits) {
818 *failed_start = start;
823 prealloc = alloc_extent_state_atomic(prealloc);
825 err = split_state(tree, state, prealloc, start);
826 BUG_ON(err == -EEXIST);
830 if (state->end <= end) {
831 err = set_state_bits(tree, state, &bits);
834 cache_state(state, cached_state);
835 merge_state(tree, state);
836 if (last_end == (u64)-1)
838 start = last_end + 1;
843 * | ---- desired range ---- |
844 * | state | or | state |
846 * There's a hole, we need to insert something in it and
847 * ignore the extent we found.
849 if (state->start > start) {
851 if (end < last_start)
854 this_end = last_start - 1;
856 prealloc = alloc_extent_state_atomic(prealloc);
860 * Avoid to free 'prealloc' if it can be merged with
863 atomic_inc(&prealloc->refs);
864 err = insert_state(tree, prealloc, start, this_end,
866 BUG_ON(err == -EEXIST);
868 free_extent_state(prealloc);
872 cache_state(prealloc, cached_state);
873 free_extent_state(prealloc);
875 start = this_end + 1;
879 * | ---- desired range ---- |
881 * We need to split the extent, and set the bit
884 if (state->start <= end && state->end > end) {
885 if (state->state & exclusive_bits) {
886 *failed_start = start;
891 prealloc = alloc_extent_state_atomic(prealloc);
893 err = split_state(tree, state, prealloc, end + 1);
894 BUG_ON(err == -EEXIST);
896 err = set_state_bits(tree, prealloc, &bits);
901 cache_state(prealloc, cached_state);
902 merge_state(tree, prealloc);
910 spin_unlock(&tree->lock);
912 free_extent_state(prealloc);
919 spin_unlock(&tree->lock);
920 if (mask & __GFP_WAIT)
925 /* wrappers around set/clear extent bit */
926 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
929 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
933 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
934 int bits, gfp_t mask)
936 return set_extent_bit(tree, start, end, bits, 0, NULL,
940 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
941 int bits, gfp_t mask)
943 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
946 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
947 struct extent_state **cached_state, gfp_t mask)
949 return set_extent_bit(tree, start, end,
950 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
951 0, NULL, cached_state, mask);
954 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
957 return clear_extent_bit(tree, start, end,
958 EXTENT_DIRTY | EXTENT_DELALLOC |
959 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
962 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
965 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
969 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
970 struct extent_state **cached_state, gfp_t mask)
972 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
973 NULL, cached_state, mask);
976 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
977 u64 end, struct extent_state **cached_state,
980 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
985 * either insert or lock state struct between start and end use mask to tell
986 * us if waiting is desired.
988 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
989 int bits, struct extent_state **cached_state, gfp_t mask)
994 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
995 EXTENT_LOCKED, &failed_start,
997 if (err == -EEXIST && (mask & __GFP_WAIT)) {
998 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
999 start = failed_start;
1003 WARN_ON(start > end);
1008 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1010 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1013 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1019 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1020 &failed_start, NULL, mask);
1021 if (err == -EEXIST) {
1022 if (failed_start > start)
1023 clear_extent_bit(tree, start, failed_start - 1,
1024 EXTENT_LOCKED, 1, 0, NULL, mask);
1030 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1031 struct extent_state **cached, gfp_t mask)
1033 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1037 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1039 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1044 * helper function to set both pages and extents in the tree writeback
1046 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1048 unsigned long index = start >> PAGE_CACHE_SHIFT;
1049 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1052 while (index <= end_index) {
1053 page = find_get_page(tree->mapping, index);
1055 set_page_writeback(page);
1056 page_cache_release(page);
1063 * find the first offset in the io tree with 'bits' set. zero is
1064 * returned if we find something, and *start_ret and *end_ret are
1065 * set to reflect the state struct that was found.
1067 * If nothing was found, 1 is returned, < 0 on error
1069 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1070 u64 *start_ret, u64 *end_ret, int bits)
1072 struct rb_node *node;
1073 struct extent_state *state;
1076 spin_lock(&tree->lock);
1078 * this search will find all the extents that end after
1081 node = tree_search(tree, start);
1086 state = rb_entry(node, struct extent_state, rb_node);
1087 if (state->end >= start && (state->state & bits)) {
1088 *start_ret = state->start;
1089 *end_ret = state->end;
1093 node = rb_next(node);
1098 spin_unlock(&tree->lock);
1102 /* find the first state struct with 'bits' set after 'start', and
1103 * return it. tree->lock must be held. NULL will returned if
1104 * nothing was found after 'start'
1106 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1107 u64 start, int bits)
1109 struct rb_node *node;
1110 struct extent_state *state;
1113 * this search will find all the extents that end after
1116 node = tree_search(tree, start);
1121 state = rb_entry(node, struct extent_state, rb_node);
1122 if (state->end >= start && (state->state & bits))
1125 node = rb_next(node);
1134 * find a contiguous range of bytes in the file marked as delalloc, not
1135 * more than 'max_bytes'. start and end are used to return the range,
1137 * 1 is returned if we find something, 0 if nothing was in the tree
1139 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1140 u64 *start, u64 *end, u64 max_bytes,
1141 struct extent_state **cached_state)
1143 struct rb_node *node;
1144 struct extent_state *state;
1145 u64 cur_start = *start;
1147 u64 total_bytes = 0;
1149 spin_lock(&tree->lock);
1152 * this search will find all the extents that end after
1155 node = tree_search(tree, cur_start);
1163 state = rb_entry(node, struct extent_state, rb_node);
1164 if (found && (state->start != cur_start ||
1165 (state->state & EXTENT_BOUNDARY))) {
1168 if (!(state->state & EXTENT_DELALLOC)) {
1174 *start = state->start;
1175 *cached_state = state;
1176 atomic_inc(&state->refs);
1180 cur_start = state->end + 1;
1181 node = rb_next(node);
1184 total_bytes += state->end - state->start + 1;
1185 if (total_bytes >= max_bytes)
1189 spin_unlock(&tree->lock);
1193 static noinline int __unlock_for_delalloc(struct inode *inode,
1194 struct page *locked_page,
1198 struct page *pages[16];
1199 unsigned long index = start >> PAGE_CACHE_SHIFT;
1200 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1201 unsigned long nr_pages = end_index - index + 1;
1204 if (index == locked_page->index && end_index == index)
1207 while (nr_pages > 0) {
1208 ret = find_get_pages_contig(inode->i_mapping, index,
1209 min_t(unsigned long, nr_pages,
1210 ARRAY_SIZE(pages)), pages);
1211 for (i = 0; i < ret; i++) {
1212 if (pages[i] != locked_page)
1213 unlock_page(pages[i]);
1214 page_cache_release(pages[i]);
1223 static noinline int lock_delalloc_pages(struct inode *inode,
1224 struct page *locked_page,
1228 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1229 unsigned long start_index = index;
1230 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1231 unsigned long pages_locked = 0;
1232 struct page *pages[16];
1233 unsigned long nrpages;
1237 /* the caller is responsible for locking the start index */
1238 if (index == locked_page->index && index == end_index)
1241 /* skip the page at the start index */
1242 nrpages = end_index - index + 1;
1243 while (nrpages > 0) {
1244 ret = find_get_pages_contig(inode->i_mapping, index,
1245 min_t(unsigned long,
1246 nrpages, ARRAY_SIZE(pages)), pages);
1251 /* now we have an array of pages, lock them all */
1252 for (i = 0; i < ret; i++) {
1254 * the caller is taking responsibility for
1257 if (pages[i] != locked_page) {
1258 lock_page(pages[i]);
1259 if (!PageDirty(pages[i]) ||
1260 pages[i]->mapping != inode->i_mapping) {
1262 unlock_page(pages[i]);
1263 page_cache_release(pages[i]);
1267 page_cache_release(pages[i]);
1276 if (ret && pages_locked) {
1277 __unlock_for_delalloc(inode, locked_page,
1279 ((u64)(start_index + pages_locked - 1)) <<
1286 * find a contiguous range of bytes in the file marked as delalloc, not
1287 * more than 'max_bytes'. start and end are used to return the range,
1289 * 1 is returned if we find something, 0 if nothing was in the tree
1291 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1292 struct extent_io_tree *tree,
1293 struct page *locked_page,
1294 u64 *start, u64 *end,
1300 struct extent_state *cached_state = NULL;
1305 /* step one, find a bunch of delalloc bytes starting at start */
1306 delalloc_start = *start;
1308 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1309 max_bytes, &cached_state);
1310 if (!found || delalloc_end <= *start) {
1311 *start = delalloc_start;
1312 *end = delalloc_end;
1313 free_extent_state(cached_state);
1318 * start comes from the offset of locked_page. We have to lock
1319 * pages in order, so we can't process delalloc bytes before
1322 if (delalloc_start < *start)
1323 delalloc_start = *start;
1326 * make sure to limit the number of pages we try to lock down
1329 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1330 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1332 /* step two, lock all the pages after the page that has start */
1333 ret = lock_delalloc_pages(inode, locked_page,
1334 delalloc_start, delalloc_end);
1335 if (ret == -EAGAIN) {
1336 /* some of the pages are gone, lets avoid looping by
1337 * shortening the size of the delalloc range we're searching
1339 free_extent_state(cached_state);
1341 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1342 max_bytes = PAGE_CACHE_SIZE - offset;
1352 /* step three, lock the state bits for the whole range */
1353 lock_extent_bits(tree, delalloc_start, delalloc_end,
1354 0, &cached_state, GFP_NOFS);
1356 /* then test to make sure it is all still delalloc */
1357 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1358 EXTENT_DELALLOC, 1, cached_state);
1360 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1361 &cached_state, GFP_NOFS);
1362 __unlock_for_delalloc(inode, locked_page,
1363 delalloc_start, delalloc_end);
1367 free_extent_state(cached_state);
1368 *start = delalloc_start;
1369 *end = delalloc_end;
1374 int extent_clear_unlock_delalloc(struct inode *inode,
1375 struct extent_io_tree *tree,
1376 u64 start, u64 end, struct page *locked_page,
1380 struct page *pages[16];
1381 unsigned long index = start >> PAGE_CACHE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1383 unsigned long nr_pages = end_index - index + 1;
1387 if (op & EXTENT_CLEAR_UNLOCK)
1388 clear_bits |= EXTENT_LOCKED;
1389 if (op & EXTENT_CLEAR_DIRTY)
1390 clear_bits |= EXTENT_DIRTY;
1392 if (op & EXTENT_CLEAR_DELALLOC)
1393 clear_bits |= EXTENT_DELALLOC;
1395 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1396 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1397 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1398 EXTENT_SET_PRIVATE2)))
1401 while (nr_pages > 0) {
1402 ret = find_get_pages_contig(inode->i_mapping, index,
1403 min_t(unsigned long,
1404 nr_pages, ARRAY_SIZE(pages)), pages);
1405 for (i = 0; i < ret; i++) {
1407 if (op & EXTENT_SET_PRIVATE2)
1408 SetPagePrivate2(pages[i]);
1410 if (pages[i] == locked_page) {
1411 page_cache_release(pages[i]);
1414 if (op & EXTENT_CLEAR_DIRTY)
1415 clear_page_dirty_for_io(pages[i]);
1416 if (op & EXTENT_SET_WRITEBACK)
1417 set_page_writeback(pages[i]);
1418 if (op & EXTENT_END_WRITEBACK)
1419 end_page_writeback(pages[i]);
1420 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1421 unlock_page(pages[i]);
1422 page_cache_release(pages[i]);
1432 * count the number of bytes in the tree that have a given bit(s)
1433 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1434 * cached. The total number found is returned.
1436 u64 count_range_bits(struct extent_io_tree *tree,
1437 u64 *start, u64 search_end, u64 max_bytes,
1438 unsigned long bits, int contig)
1440 struct rb_node *node;
1441 struct extent_state *state;
1442 u64 cur_start = *start;
1443 u64 total_bytes = 0;
1447 if (search_end <= cur_start) {
1452 spin_lock(&tree->lock);
1453 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1454 total_bytes = tree->dirty_bytes;
1458 * this search will find all the extents that end after
1461 node = tree_search(tree, cur_start);
1466 state = rb_entry(node, struct extent_state, rb_node);
1467 if (state->start > search_end)
1469 if (contig && found && state->start > last + 1)
1471 if (state->end >= cur_start && (state->state & bits) == bits) {
1472 total_bytes += min(search_end, state->end) + 1 -
1473 max(cur_start, state->start);
1474 if (total_bytes >= max_bytes)
1477 *start = state->start;
1481 } else if (contig && found) {
1484 node = rb_next(node);
1489 spin_unlock(&tree->lock);
1494 * set the private field for a given byte offset in the tree. If there isn't
1495 * an extent_state there already, this does nothing.
1497 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1499 struct rb_node *node;
1500 struct extent_state *state;
1503 spin_lock(&tree->lock);
1505 * this search will find all the extents that end after
1508 node = tree_search(tree, start);
1513 state = rb_entry(node, struct extent_state, rb_node);
1514 if (state->start != start) {
1518 state->private = private;
1520 spin_unlock(&tree->lock);
1524 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1526 struct rb_node *node;
1527 struct extent_state *state;
1530 spin_lock(&tree->lock);
1532 * this search will find all the extents that end after
1535 node = tree_search(tree, start);
1540 state = rb_entry(node, struct extent_state, rb_node);
1541 if (state->start != start) {
1545 *private = state->private;
1547 spin_unlock(&tree->lock);
1552 * searches a range in the state tree for a given mask.
1553 * If 'filled' == 1, this returns 1 only if every extent in the tree
1554 * has the bits set. Otherwise, 1 is returned if any bit in the
1555 * range is found set.
1557 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1558 int bits, int filled, struct extent_state *cached)
1560 struct extent_state *state = NULL;
1561 struct rb_node *node;
1564 spin_lock(&tree->lock);
1565 if (cached && cached->tree && cached->start == start)
1566 node = &cached->rb_node;
1568 node = tree_search(tree, start);
1569 while (node && start <= end) {
1570 state = rb_entry(node, struct extent_state, rb_node);
1572 if (filled && state->start > start) {
1577 if (state->start > end)
1580 if (state->state & bits) {
1584 } else if (filled) {
1589 if (state->end == (u64)-1)
1592 start = state->end + 1;
1595 node = rb_next(node);
1602 spin_unlock(&tree->lock);
1607 * helper function to set a given page up to date if all the
1608 * extents in the tree for that page are up to date
1610 static int check_page_uptodate(struct extent_io_tree *tree,
1613 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1614 u64 end = start + PAGE_CACHE_SIZE - 1;
1615 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1616 SetPageUptodate(page);
1621 * helper function to unlock a page if all the extents in the tree
1622 * for that page are unlocked
1624 static int check_page_locked(struct extent_io_tree *tree,
1627 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1628 u64 end = start + PAGE_CACHE_SIZE - 1;
1629 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1635 * helper function to end page writeback if all the extents
1636 * in the tree for that page are done with writeback
1638 static int check_page_writeback(struct extent_io_tree *tree,
1641 end_page_writeback(page);
1645 /* lots and lots of room for performance fixes in the end_bio funcs */
1648 * after a writepage IO is done, we need to:
1649 * clear the uptodate bits on error
1650 * clear the writeback bits in the extent tree for this IO
1651 * end_page_writeback if the page has no more pending IO
1653 * Scheduling is not allowed, so the extent state tree is expected
1654 * to have one and only one object corresponding to this IO.
1656 static void end_bio_extent_writepage(struct bio *bio, int err)
1658 int uptodate = err == 0;
1659 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1660 struct extent_io_tree *tree;
1667 struct page *page = bvec->bv_page;
1668 tree = &BTRFS_I(page->mapping->host)->io_tree;
1670 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1672 end = start + bvec->bv_len - 1;
1674 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1679 if (--bvec >= bio->bi_io_vec)
1680 prefetchw(&bvec->bv_page->flags);
1681 if (tree->ops && tree->ops->writepage_end_io_hook) {
1682 ret = tree->ops->writepage_end_io_hook(page, start,
1683 end, NULL, uptodate);
1688 if (!uptodate && tree->ops &&
1689 tree->ops->writepage_io_failed_hook) {
1690 ret = tree->ops->writepage_io_failed_hook(bio, page,
1693 uptodate = (err == 0);
1699 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1700 ClearPageUptodate(page);
1705 end_page_writeback(page);
1707 check_page_writeback(tree, page);
1708 } while (bvec >= bio->bi_io_vec);
1714 * after a readpage IO is done, we need to:
1715 * clear the uptodate bits on error
1716 * set the uptodate bits if things worked
1717 * set the page up to date if all extents in the tree are uptodate
1718 * clear the lock bit in the extent tree
1719 * unlock the page if there are no other extents locked for it
1721 * Scheduling is not allowed, so the extent state tree is expected
1722 * to have one and only one object corresponding to this IO.
1724 static void end_bio_extent_readpage(struct bio *bio, int err)
1726 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1727 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1728 struct bio_vec *bvec = bio->bi_io_vec;
1729 struct extent_io_tree *tree;
1739 struct page *page = bvec->bv_page;
1740 struct extent_state *cached = NULL;
1741 struct extent_state *state;
1743 tree = &BTRFS_I(page->mapping->host)->io_tree;
1745 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1747 end = start + bvec->bv_len - 1;
1749 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1754 if (++bvec <= bvec_end)
1755 prefetchw(&bvec->bv_page->flags);
1757 spin_lock(&tree->lock);
1758 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1759 if (state && state->start == start) {
1761 * take a reference on the state, unlock will drop
1764 cache_state(state, &cached);
1766 spin_unlock(&tree->lock);
1768 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1769 ret = tree->ops->readpage_end_io_hook(page, start, end,
1774 if (!uptodate && tree->ops &&
1775 tree->ops->readpage_io_failed_hook) {
1776 ret = tree->ops->readpage_io_failed_hook(bio, page,
1780 test_bit(BIO_UPTODATE, &bio->bi_flags);
1783 uncache_state(&cached);
1789 set_extent_uptodate(tree, start, end, &cached,
1792 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1796 SetPageUptodate(page);
1798 ClearPageUptodate(page);
1804 check_page_uptodate(tree, page);
1806 ClearPageUptodate(page);
1809 check_page_locked(tree, page);
1811 } while (bvec <= bvec_end);
1817 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1822 bio = bio_alloc(gfp_flags, nr_vecs);
1824 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1825 while (!bio && (nr_vecs /= 2))
1826 bio = bio_alloc(gfp_flags, nr_vecs);
1831 bio->bi_bdev = bdev;
1832 bio->bi_sector = first_sector;
1837 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1838 unsigned long bio_flags)
1841 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1842 struct page *page = bvec->bv_page;
1843 struct extent_io_tree *tree = bio->bi_private;
1846 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1848 bio->bi_private = NULL;
1852 if (tree->ops && tree->ops->submit_bio_hook)
1853 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1854 mirror_num, bio_flags, start);
1856 submit_bio(rw, bio);
1857 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1863 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1864 struct page *page, sector_t sector,
1865 size_t size, unsigned long offset,
1866 struct block_device *bdev,
1867 struct bio **bio_ret,
1868 unsigned long max_pages,
1869 bio_end_io_t end_io_func,
1871 unsigned long prev_bio_flags,
1872 unsigned long bio_flags)
1878 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1879 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1880 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1882 if (bio_ret && *bio_ret) {
1885 contig = bio->bi_sector == sector;
1887 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1890 if (prev_bio_flags != bio_flags || !contig ||
1891 (tree->ops && tree->ops->merge_bio_hook &&
1892 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1894 bio_add_page(bio, page, page_size, offset) < page_size) {
1895 ret = submit_one_bio(rw, bio, mirror_num,
1902 if (this_compressed)
1905 nr = bio_get_nr_vecs(bdev);
1907 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1911 bio_add_page(bio, page, page_size, offset);
1912 bio->bi_end_io = end_io_func;
1913 bio->bi_private = tree;
1918 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1923 void set_page_extent_mapped(struct page *page)
1925 if (!PagePrivate(page)) {
1926 SetPagePrivate(page);
1927 page_cache_get(page);
1928 set_page_private(page, EXTENT_PAGE_PRIVATE);
1932 static void set_page_extent_head(struct page *page, unsigned long len)
1934 WARN_ON(!PagePrivate(page));
1935 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1939 * basic readpage implementation. Locked extent state structs are inserted
1940 * into the tree that are removed when the IO is done (by the end_io
1943 static int __extent_read_full_page(struct extent_io_tree *tree,
1945 get_extent_t *get_extent,
1946 struct bio **bio, int mirror_num,
1947 unsigned long *bio_flags)
1949 struct inode *inode = page->mapping->host;
1950 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1951 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1955 u64 last_byte = i_size_read(inode);
1959 struct extent_map *em;
1960 struct block_device *bdev;
1961 struct btrfs_ordered_extent *ordered;
1964 size_t pg_offset = 0;
1966 size_t disk_io_size;
1967 size_t blocksize = inode->i_sb->s_blocksize;
1968 unsigned long this_bio_flag = 0;
1970 set_page_extent_mapped(page);
1974 lock_extent(tree, start, end, GFP_NOFS);
1975 ordered = btrfs_lookup_ordered_extent(inode, start);
1978 unlock_extent(tree, start, end, GFP_NOFS);
1979 btrfs_start_ordered_extent(inode, ordered, 1);
1980 btrfs_put_ordered_extent(ordered);
1983 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1985 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1988 iosize = PAGE_CACHE_SIZE - zero_offset;
1989 userpage = kmap_atomic(page, KM_USER0);
1990 memset(userpage + zero_offset, 0, iosize);
1991 flush_dcache_page(page);
1992 kunmap_atomic(userpage, KM_USER0);
1995 while (cur <= end) {
1996 if (cur >= last_byte) {
1998 struct extent_state *cached = NULL;
2000 iosize = PAGE_CACHE_SIZE - pg_offset;
2001 userpage = kmap_atomic(page, KM_USER0);
2002 memset(userpage + pg_offset, 0, iosize);
2003 flush_dcache_page(page);
2004 kunmap_atomic(userpage, KM_USER0);
2005 set_extent_uptodate(tree, cur, cur + iosize - 1,
2007 unlock_extent_cached(tree, cur, cur + iosize - 1,
2011 em = get_extent(inode, page, pg_offset, cur,
2013 if (IS_ERR_OR_NULL(em)) {
2015 unlock_extent(tree, cur, end, GFP_NOFS);
2018 extent_offset = cur - em->start;
2019 BUG_ON(extent_map_end(em) <= cur);
2022 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2023 this_bio_flag = EXTENT_BIO_COMPRESSED;
2024 extent_set_compress_type(&this_bio_flag,
2028 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2029 cur_end = min(extent_map_end(em) - 1, end);
2030 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2031 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2032 disk_io_size = em->block_len;
2033 sector = em->block_start >> 9;
2035 sector = (em->block_start + extent_offset) >> 9;
2036 disk_io_size = iosize;
2039 block_start = em->block_start;
2040 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2041 block_start = EXTENT_MAP_HOLE;
2042 free_extent_map(em);
2045 /* we've found a hole, just zero and go on */
2046 if (block_start == EXTENT_MAP_HOLE) {
2048 struct extent_state *cached = NULL;
2050 userpage = kmap_atomic(page, KM_USER0);
2051 memset(userpage + pg_offset, 0, iosize);
2052 flush_dcache_page(page);
2053 kunmap_atomic(userpage, KM_USER0);
2055 set_extent_uptodate(tree, cur, cur + iosize - 1,
2057 unlock_extent_cached(tree, cur, cur + iosize - 1,
2060 pg_offset += iosize;
2063 /* the get_extent function already copied into the page */
2064 if (test_range_bit(tree, cur, cur_end,
2065 EXTENT_UPTODATE, 1, NULL)) {
2066 check_page_uptodate(tree, page);
2067 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2069 pg_offset += iosize;
2072 /* we have an inline extent but it didn't get marked up
2073 * to date. Error out
2075 if (block_start == EXTENT_MAP_INLINE) {
2077 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2079 pg_offset += iosize;
2084 if (tree->ops && tree->ops->readpage_io_hook) {
2085 ret = tree->ops->readpage_io_hook(page, cur,
2089 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2091 ret = submit_extent_page(READ, tree, page,
2092 sector, disk_io_size, pg_offset,
2094 end_bio_extent_readpage, mirror_num,
2098 *bio_flags = this_bio_flag;
2103 pg_offset += iosize;
2106 if (!PageError(page))
2107 SetPageUptodate(page);
2113 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2114 get_extent_t *get_extent)
2116 struct bio *bio = NULL;
2117 unsigned long bio_flags = 0;
2120 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2123 ret = submit_one_bio(READ, bio, 0, bio_flags);
2127 static noinline void update_nr_written(struct page *page,
2128 struct writeback_control *wbc,
2129 unsigned long nr_written)
2131 wbc->nr_to_write -= nr_written;
2132 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2133 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2134 page->mapping->writeback_index = page->index + nr_written;
2138 * the writepage semantics are similar to regular writepage. extent
2139 * records are inserted to lock ranges in the tree, and as dirty areas
2140 * are found, they are marked writeback. Then the lock bits are removed
2141 * and the end_io handler clears the writeback ranges
2143 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2146 struct inode *inode = page->mapping->host;
2147 struct extent_page_data *epd = data;
2148 struct extent_io_tree *tree = epd->tree;
2149 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2151 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2155 u64 last_byte = i_size_read(inode);
2159 struct extent_state *cached_state = NULL;
2160 struct extent_map *em;
2161 struct block_device *bdev;
2164 size_t pg_offset = 0;
2166 loff_t i_size = i_size_read(inode);
2167 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2173 unsigned long nr_written = 0;
2175 if (wbc->sync_mode == WB_SYNC_ALL)
2176 write_flags = WRITE_SYNC;
2178 write_flags = WRITE;
2180 trace___extent_writepage(page, inode, wbc);
2182 WARN_ON(!PageLocked(page));
2183 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2184 if (page->index > end_index ||
2185 (page->index == end_index && !pg_offset)) {
2186 page->mapping->a_ops->invalidatepage(page, 0);
2191 if (page->index == end_index) {
2194 userpage = kmap_atomic(page, KM_USER0);
2195 memset(userpage + pg_offset, 0,
2196 PAGE_CACHE_SIZE - pg_offset);
2197 kunmap_atomic(userpage, KM_USER0);
2198 flush_dcache_page(page);
2202 set_page_extent_mapped(page);
2204 delalloc_start = start;
2207 if (!epd->extent_locked) {
2208 u64 delalloc_to_write = 0;
2210 * make sure the wbc mapping index is at least updated
2213 update_nr_written(page, wbc, 0);
2215 while (delalloc_end < page_end) {
2216 nr_delalloc = find_lock_delalloc_range(inode, tree,
2221 if (nr_delalloc == 0) {
2222 delalloc_start = delalloc_end + 1;
2225 tree->ops->fill_delalloc(inode, page, delalloc_start,
2226 delalloc_end, &page_started,
2229 * delalloc_end is already one less than the total
2230 * length, so we don't subtract one from
2233 delalloc_to_write += (delalloc_end - delalloc_start +
2236 delalloc_start = delalloc_end + 1;
2238 if (wbc->nr_to_write < delalloc_to_write) {
2241 if (delalloc_to_write < thresh * 2)
2242 thresh = delalloc_to_write;
2243 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2247 /* did the fill delalloc function already unlock and start
2253 * we've unlocked the page, so we can't update
2254 * the mapping's writeback index, just update
2257 wbc->nr_to_write -= nr_written;
2261 if (tree->ops && tree->ops->writepage_start_hook) {
2262 ret = tree->ops->writepage_start_hook(page, start,
2264 if (ret == -EAGAIN) {
2265 redirty_page_for_writepage(wbc, page);
2266 update_nr_written(page, wbc, nr_written);
2274 * we don't want to touch the inode after unlocking the page,
2275 * so we update the mapping writeback index now
2277 update_nr_written(page, wbc, nr_written + 1);
2280 if (last_byte <= start) {
2281 if (tree->ops && tree->ops->writepage_end_io_hook)
2282 tree->ops->writepage_end_io_hook(page, start,
2287 blocksize = inode->i_sb->s_blocksize;
2289 while (cur <= end) {
2290 if (cur >= last_byte) {
2291 if (tree->ops && tree->ops->writepage_end_io_hook)
2292 tree->ops->writepage_end_io_hook(page, cur,
2296 em = epd->get_extent(inode, page, pg_offset, cur,
2298 if (IS_ERR_OR_NULL(em)) {
2303 extent_offset = cur - em->start;
2304 BUG_ON(extent_map_end(em) <= cur);
2306 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2307 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2308 sector = (em->block_start + extent_offset) >> 9;
2310 block_start = em->block_start;
2311 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2312 free_extent_map(em);
2316 * compressed and inline extents are written through other
2319 if (compressed || block_start == EXTENT_MAP_HOLE ||
2320 block_start == EXTENT_MAP_INLINE) {
2322 * end_io notification does not happen here for
2323 * compressed extents
2325 if (!compressed && tree->ops &&
2326 tree->ops->writepage_end_io_hook)
2327 tree->ops->writepage_end_io_hook(page, cur,
2330 else if (compressed) {
2331 /* we don't want to end_page_writeback on
2332 * a compressed extent. this happens
2339 pg_offset += iosize;
2342 /* leave this out until we have a page_mkwrite call */
2343 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2344 EXTENT_DIRTY, 0, NULL)) {
2346 pg_offset += iosize;
2350 if (tree->ops && tree->ops->writepage_io_hook) {
2351 ret = tree->ops->writepage_io_hook(page, cur,
2359 unsigned long max_nr = end_index + 1;
2361 set_range_writeback(tree, cur, cur + iosize - 1);
2362 if (!PageWriteback(page)) {
2363 printk(KERN_ERR "btrfs warning page %lu not "
2364 "writeback, cur %llu end %llu\n",
2365 page->index, (unsigned long long)cur,
2366 (unsigned long long)end);
2369 ret = submit_extent_page(write_flags, tree, page,
2370 sector, iosize, pg_offset,
2371 bdev, &epd->bio, max_nr,
2372 end_bio_extent_writepage,
2378 pg_offset += iosize;
2383 /* make sure the mapping tag for page dirty gets cleared */
2384 set_page_writeback(page);
2385 end_page_writeback(page);
2391 /* drop our reference on any cached states */
2392 free_extent_state(cached_state);
2397 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2398 * @mapping: address space structure to write
2399 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2400 * @writepage: function called for each page
2401 * @data: data passed to writepage function
2403 * If a page is already under I/O, write_cache_pages() skips it, even
2404 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2405 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2406 * and msync() need to guarantee that all the data which was dirty at the time
2407 * the call was made get new I/O started against them. If wbc->sync_mode is
2408 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2409 * existing IO to complete.
2411 static int extent_write_cache_pages(struct extent_io_tree *tree,
2412 struct address_space *mapping,
2413 struct writeback_control *wbc,
2414 writepage_t writepage, void *data,
2415 void (*flush_fn)(void *))
2419 int nr_to_write_done = 0;
2420 struct pagevec pvec;
2423 pgoff_t end; /* Inclusive */
2426 pagevec_init(&pvec, 0);
2427 if (wbc->range_cyclic) {
2428 index = mapping->writeback_index; /* Start from prev offset */
2431 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2432 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2436 while (!done && !nr_to_write_done && (index <= end) &&
2437 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2438 PAGECACHE_TAG_DIRTY, min(end - index,
2439 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2443 for (i = 0; i < nr_pages; i++) {
2444 struct page *page = pvec.pages[i];
2447 * At this point we hold neither mapping->tree_lock nor
2448 * lock on the page itself: the page may be truncated or
2449 * invalidated (changing page->mapping to NULL), or even
2450 * swizzled back from swapper_space to tmpfs file
2453 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2454 tree->ops->write_cache_pages_lock_hook(page);
2458 if (unlikely(page->mapping != mapping)) {
2463 if (!wbc->range_cyclic && page->index > end) {
2469 if (wbc->sync_mode != WB_SYNC_NONE) {
2470 if (PageWriteback(page))
2472 wait_on_page_writeback(page);
2475 if (PageWriteback(page) ||
2476 !clear_page_dirty_for_io(page)) {
2481 ret = (*writepage)(page, wbc, data);
2483 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2491 * the filesystem may choose to bump up nr_to_write.
2492 * We have to make sure to honor the new nr_to_write
2495 nr_to_write_done = wbc->nr_to_write <= 0;
2497 pagevec_release(&pvec);
2500 if (!scanned && !done) {
2502 * We hit the last page and there is more work to be done: wrap
2503 * back to the start of the file
2512 static void flush_epd_write_bio(struct extent_page_data *epd)
2516 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2518 submit_one_bio(WRITE, epd->bio, 0, 0);
2523 static noinline void flush_write_bio(void *data)
2525 struct extent_page_data *epd = data;
2526 flush_epd_write_bio(epd);
2529 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2530 get_extent_t *get_extent,
2531 struct writeback_control *wbc)
2534 struct address_space *mapping = page->mapping;
2535 struct extent_page_data epd = {
2538 .get_extent = get_extent,
2540 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2542 struct writeback_control wbc_writepages = {
2543 .sync_mode = wbc->sync_mode,
2544 .older_than_this = NULL,
2546 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2547 .range_end = (loff_t)-1,
2550 ret = __extent_writepage(page, wbc, &epd);
2552 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2553 __extent_writepage, &epd, flush_write_bio);
2554 flush_epd_write_bio(&epd);
2558 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2559 u64 start, u64 end, get_extent_t *get_extent,
2563 struct address_space *mapping = inode->i_mapping;
2565 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2568 struct extent_page_data epd = {
2571 .get_extent = get_extent,
2573 .sync_io = mode == WB_SYNC_ALL,
2575 struct writeback_control wbc_writepages = {
2577 .older_than_this = NULL,
2578 .nr_to_write = nr_pages * 2,
2579 .range_start = start,
2580 .range_end = end + 1,
2583 while (start <= end) {
2584 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2585 if (clear_page_dirty_for_io(page))
2586 ret = __extent_writepage(page, &wbc_writepages, &epd);
2588 if (tree->ops && tree->ops->writepage_end_io_hook)
2589 tree->ops->writepage_end_io_hook(page, start,
2590 start + PAGE_CACHE_SIZE - 1,
2594 page_cache_release(page);
2595 start += PAGE_CACHE_SIZE;
2598 flush_epd_write_bio(&epd);
2602 int extent_writepages(struct extent_io_tree *tree,
2603 struct address_space *mapping,
2604 get_extent_t *get_extent,
2605 struct writeback_control *wbc)
2608 struct extent_page_data epd = {
2611 .get_extent = get_extent,
2613 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2616 ret = extent_write_cache_pages(tree, mapping, wbc,
2617 __extent_writepage, &epd,
2619 flush_epd_write_bio(&epd);
2623 int extent_readpages(struct extent_io_tree *tree,
2624 struct address_space *mapping,
2625 struct list_head *pages, unsigned nr_pages,
2626 get_extent_t get_extent)
2628 struct bio *bio = NULL;
2630 unsigned long bio_flags = 0;
2632 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2633 struct page *page = list_entry(pages->prev, struct page, lru);
2635 prefetchw(&page->flags);
2636 list_del(&page->lru);
2637 if (!add_to_page_cache_lru(page, mapping,
2638 page->index, GFP_NOFS)) {
2639 __extent_read_full_page(tree, page, get_extent,
2640 &bio, 0, &bio_flags);
2642 page_cache_release(page);
2644 BUG_ON(!list_empty(pages));
2646 submit_one_bio(READ, bio, 0, bio_flags);
2651 * basic invalidatepage code, this waits on any locked or writeback
2652 * ranges corresponding to the page, and then deletes any extent state
2653 * records from the tree
2655 int extent_invalidatepage(struct extent_io_tree *tree,
2656 struct page *page, unsigned long offset)
2658 struct extent_state *cached_state = NULL;
2659 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2660 u64 end = start + PAGE_CACHE_SIZE - 1;
2661 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2663 start += (offset + blocksize - 1) & ~(blocksize - 1);
2667 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2668 wait_on_page_writeback(page);
2669 clear_extent_bit(tree, start, end,
2670 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2671 EXTENT_DO_ACCOUNTING,
2672 1, 1, &cached_state, GFP_NOFS);
2677 * a helper for releasepage, this tests for areas of the page that
2678 * are locked or under IO and drops the related state bits if it is safe
2681 int try_release_extent_state(struct extent_map_tree *map,
2682 struct extent_io_tree *tree, struct page *page,
2685 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2686 u64 end = start + PAGE_CACHE_SIZE - 1;
2689 if (test_range_bit(tree, start, end,
2690 EXTENT_IOBITS, 0, NULL))
2693 if ((mask & GFP_NOFS) == GFP_NOFS)
2696 * at this point we can safely clear everything except the
2697 * locked bit and the nodatasum bit
2699 ret = clear_extent_bit(tree, start, end,
2700 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2703 /* if clear_extent_bit failed for enomem reasons,
2704 * we can't allow the release to continue.
2715 * a helper for releasepage. As long as there are no locked extents
2716 * in the range corresponding to the page, both state records and extent
2717 * map records are removed
2719 int try_release_extent_mapping(struct extent_map_tree *map,
2720 struct extent_io_tree *tree, struct page *page,
2723 struct extent_map *em;
2724 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2725 u64 end = start + PAGE_CACHE_SIZE - 1;
2727 if ((mask & __GFP_WAIT) &&
2728 page->mapping->host->i_size > 16 * 1024 * 1024) {
2730 while (start <= end) {
2731 len = end - start + 1;
2732 write_lock(&map->lock);
2733 em = lookup_extent_mapping(map, start, len);
2734 if (IS_ERR_OR_NULL(em)) {
2735 write_unlock(&map->lock);
2738 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2739 em->start != start) {
2740 write_unlock(&map->lock);
2741 free_extent_map(em);
2744 if (!test_range_bit(tree, em->start,
2745 extent_map_end(em) - 1,
2746 EXTENT_LOCKED | EXTENT_WRITEBACK,
2748 remove_extent_mapping(map, em);
2749 /* once for the rb tree */
2750 free_extent_map(em);
2752 start = extent_map_end(em);
2753 write_unlock(&map->lock);
2756 free_extent_map(em);
2759 return try_release_extent_state(map, tree, page, mask);
2763 * helper function for fiemap, which doesn't want to see any holes.
2764 * This maps until we find something past 'last'
2766 static struct extent_map *get_extent_skip_holes(struct inode *inode,
2769 get_extent_t *get_extent)
2771 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2772 struct extent_map *em;
2779 len = last - offset;
2782 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2783 em = get_extent(inode, NULL, 0, offset, len, 0);
2784 if (IS_ERR_OR_NULL(em))
2787 /* if this isn't a hole return it */
2788 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2789 em->block_start != EXTENT_MAP_HOLE) {
2793 /* this is a hole, advance to the next extent */
2794 offset = extent_map_end(em);
2795 free_extent_map(em);
2802 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2803 __u64 start, __u64 len, get_extent_t *get_extent)
2807 u64 max = start + len;
2811 u64 last_for_get_extent = 0;
2813 u64 isize = i_size_read(inode);
2814 struct btrfs_key found_key;
2815 struct extent_map *em = NULL;
2816 struct extent_state *cached_state = NULL;
2817 struct btrfs_path *path;
2818 struct btrfs_file_extent_item *item;
2823 unsigned long emflags;
2828 path = btrfs_alloc_path();
2831 path->leave_spinning = 1;
2834 * lookup the last file extent. We're not using i_size here
2835 * because there might be preallocation past i_size
2837 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2838 path, btrfs_ino(inode), -1, 0);
2840 btrfs_free_path(path);
2845 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2846 struct btrfs_file_extent_item);
2847 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2848 found_type = btrfs_key_type(&found_key);
2850 /* No extents, but there might be delalloc bits */
2851 if (found_key.objectid != btrfs_ino(inode) ||
2852 found_type != BTRFS_EXTENT_DATA_KEY) {
2853 /* have to trust i_size as the end */
2855 last_for_get_extent = isize;
2858 * remember the start of the last extent. There are a
2859 * bunch of different factors that go into the length of the
2860 * extent, so its much less complex to remember where it started
2862 last = found_key.offset;
2863 last_for_get_extent = last + 1;
2865 btrfs_free_path(path);
2868 * we might have some extents allocated but more delalloc past those
2869 * extents. so, we trust isize unless the start of the last extent is
2874 last_for_get_extent = isize;
2877 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2878 &cached_state, GFP_NOFS);
2880 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2890 u64 offset_in_extent;
2892 /* break if the extent we found is outside the range */
2893 if (em->start >= max || extent_map_end(em) < off)
2897 * get_extent may return an extent that starts before our
2898 * requested range. We have to make sure the ranges
2899 * we return to fiemap always move forward and don't
2900 * overlap, so adjust the offsets here
2902 em_start = max(em->start, off);
2905 * record the offset from the start of the extent
2906 * for adjusting the disk offset below
2908 offset_in_extent = em_start - em->start;
2909 em_end = extent_map_end(em);
2910 em_len = em_end - em_start;
2911 emflags = em->flags;
2916 * bump off for our next call to get_extent
2918 off = extent_map_end(em);
2922 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2924 flags |= FIEMAP_EXTENT_LAST;
2925 } else if (em->block_start == EXTENT_MAP_INLINE) {
2926 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2927 FIEMAP_EXTENT_NOT_ALIGNED);
2928 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
2929 flags |= (FIEMAP_EXTENT_DELALLOC |
2930 FIEMAP_EXTENT_UNKNOWN);
2932 disko = em->block_start + offset_in_extent;
2934 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2935 flags |= FIEMAP_EXTENT_ENCODED;
2937 free_extent_map(em);
2939 if ((em_start >= last) || em_len == (u64)-1 ||
2940 (last == (u64)-1 && isize <= em_end)) {
2941 flags |= FIEMAP_EXTENT_LAST;
2945 /* now scan forward to see if this is really the last extent. */
2946 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2953 flags |= FIEMAP_EXTENT_LAST;
2956 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2962 free_extent_map(em);
2964 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
2965 &cached_state, GFP_NOFS);
2969 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2973 struct address_space *mapping;
2976 return eb->first_page;
2977 i += eb->start >> PAGE_CACHE_SHIFT;
2978 mapping = eb->first_page->mapping;
2983 * extent_buffer_page is only called after pinning the page
2984 * by increasing the reference count. So we know the page must
2985 * be in the radix tree.
2988 p = radix_tree_lookup(&mapping->page_tree, i);
2994 static inline unsigned long num_extent_pages(u64 start, u64 len)
2996 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2997 (start >> PAGE_CACHE_SHIFT);
3000 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3005 struct extent_buffer *eb = NULL;
3007 unsigned long flags;
3010 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3015 spin_lock_init(&eb->lock);
3016 init_waitqueue_head(&eb->lock_wq);
3019 spin_lock_irqsave(&leak_lock, flags);
3020 list_add(&eb->leak_list, &buffers);
3021 spin_unlock_irqrestore(&leak_lock, flags);
3023 atomic_set(&eb->refs, 1);
3028 static void __free_extent_buffer(struct extent_buffer *eb)
3031 unsigned long flags;
3032 spin_lock_irqsave(&leak_lock, flags);
3033 list_del(&eb->leak_list);
3034 spin_unlock_irqrestore(&leak_lock, flags);
3036 kmem_cache_free(extent_buffer_cache, eb);
3040 * Helper for releasing extent buffer page.
3042 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3043 unsigned long start_idx)
3045 unsigned long index;
3048 if (!eb->first_page)
3051 index = num_extent_pages(eb->start, eb->len);
3052 if (start_idx >= index)
3057 page = extent_buffer_page(eb, index);
3059 page_cache_release(page);
3060 } while (index != start_idx);
3064 * Helper for releasing the extent buffer.
3066 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3068 btrfs_release_extent_buffer_page(eb, 0);
3069 __free_extent_buffer(eb);
3072 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3073 u64 start, unsigned long len,
3076 unsigned long num_pages = num_extent_pages(start, len);
3078 unsigned long index = start >> PAGE_CACHE_SHIFT;
3079 struct extent_buffer *eb;
3080 struct extent_buffer *exists = NULL;
3082 struct address_space *mapping = tree->mapping;
3087 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3088 if (eb && atomic_inc_not_zero(&eb->refs)) {
3090 mark_page_accessed(eb->first_page);
3095 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3100 eb->first_page = page0;
3103 page_cache_get(page0);
3104 mark_page_accessed(page0);
3105 set_page_extent_mapped(page0);
3106 set_page_extent_head(page0, len);
3107 uptodate = PageUptodate(page0);
3111 for (; i < num_pages; i++, index++) {
3112 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
3117 set_page_extent_mapped(p);
3118 mark_page_accessed(p);
3121 set_page_extent_head(p, len);
3123 set_page_private(p, EXTENT_PAGE_PRIVATE);
3125 if (!PageUptodate(p))
3129 * see below about how we avoid a nasty race with release page
3130 * and why we unlock later
3136 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3138 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3142 spin_lock(&tree->buffer_lock);
3143 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3144 if (ret == -EEXIST) {
3145 exists = radix_tree_lookup(&tree->buffer,
3146 start >> PAGE_CACHE_SHIFT);
3147 /* add one reference for the caller */
3148 atomic_inc(&exists->refs);
3149 spin_unlock(&tree->buffer_lock);
3150 radix_tree_preload_end();
3153 /* add one reference for the tree */
3154 atomic_inc(&eb->refs);
3155 spin_unlock(&tree->buffer_lock);
3156 radix_tree_preload_end();
3159 * there is a race where release page may have
3160 * tried to find this extent buffer in the radix
3161 * but failed. It will tell the VM it is safe to
3162 * reclaim the, and it will clear the page private bit.
3163 * We must make sure to set the page private bit properly
3164 * after the extent buffer is in the radix tree so
3165 * it doesn't get lost
3167 set_page_extent_mapped(eb->first_page);
3168 set_page_extent_head(eb->first_page, eb->len);
3170 unlock_page(eb->first_page);
3174 if (eb->first_page && !page0)
3175 unlock_page(eb->first_page);
3177 if (!atomic_dec_and_test(&eb->refs))
3179 btrfs_release_extent_buffer(eb);
3183 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3184 u64 start, unsigned long len)
3186 struct extent_buffer *eb;
3189 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3190 if (eb && atomic_inc_not_zero(&eb->refs)) {
3192 mark_page_accessed(eb->first_page);
3200 void free_extent_buffer(struct extent_buffer *eb)
3205 if (!atomic_dec_and_test(&eb->refs))
3211 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3212 struct extent_buffer *eb)
3215 unsigned long num_pages;
3218 num_pages = num_extent_pages(eb->start, eb->len);
3220 for (i = 0; i < num_pages; i++) {
3221 page = extent_buffer_page(eb, i);
3222 if (!PageDirty(page))
3226 WARN_ON(!PagePrivate(page));
3228 set_page_extent_mapped(page);
3230 set_page_extent_head(page, eb->len);
3232 clear_page_dirty_for_io(page);
3233 spin_lock_irq(&page->mapping->tree_lock);
3234 if (!PageDirty(page)) {
3235 radix_tree_tag_clear(&page->mapping->page_tree,
3237 PAGECACHE_TAG_DIRTY);
3239 spin_unlock_irq(&page->mapping->tree_lock);
3245 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3246 struct extent_buffer *eb)
3249 unsigned long num_pages;
3252 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3253 num_pages = num_extent_pages(eb->start, eb->len);
3254 for (i = 0; i < num_pages; i++)
3255 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3259 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3260 struct extent_buffer *eb,
3261 struct extent_state **cached_state)
3265 unsigned long num_pages;
3267 num_pages = num_extent_pages(eb->start, eb->len);
3268 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3270 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3271 cached_state, GFP_NOFS);
3272 for (i = 0; i < num_pages; i++) {
3273 page = extent_buffer_page(eb, i);
3275 ClearPageUptodate(page);
3280 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3281 struct extent_buffer *eb)
3285 unsigned long num_pages;
3287 num_pages = num_extent_pages(eb->start, eb->len);
3289 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3291 for (i = 0; i < num_pages; i++) {
3292 page = extent_buffer_page(eb, i);
3293 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3294 ((i == num_pages - 1) &&
3295 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3296 check_page_uptodate(tree, page);
3299 SetPageUptodate(page);
3304 int extent_range_uptodate(struct extent_io_tree *tree,
3309 int pg_uptodate = 1;
3311 unsigned long index;
3313 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
3316 while (start <= end) {
3317 index = start >> PAGE_CACHE_SHIFT;
3318 page = find_get_page(tree->mapping, index);
3319 uptodate = PageUptodate(page);
3320 page_cache_release(page);
3325 start += PAGE_CACHE_SIZE;
3330 int extent_buffer_uptodate(struct extent_io_tree *tree,
3331 struct extent_buffer *eb,
3332 struct extent_state *cached_state)
3335 unsigned long num_pages;
3338 int pg_uptodate = 1;
3340 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3343 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3344 EXTENT_UPTODATE, 1, cached_state);
3348 num_pages = num_extent_pages(eb->start, eb->len);
3349 for (i = 0; i < num_pages; i++) {
3350 page = extent_buffer_page(eb, i);
3351 if (!PageUptodate(page)) {
3359 int read_extent_buffer_pages(struct extent_io_tree *tree,
3360 struct extent_buffer *eb,
3361 u64 start, int wait,
3362 get_extent_t *get_extent, int mirror_num)
3365 unsigned long start_i;
3369 int locked_pages = 0;
3370 int all_uptodate = 1;
3371 int inc_all_pages = 0;
3372 unsigned long num_pages;
3373 struct bio *bio = NULL;
3374 unsigned long bio_flags = 0;
3376 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3379 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3380 EXTENT_UPTODATE, 1, NULL)) {
3385 WARN_ON(start < eb->start);
3386 start_i = (start >> PAGE_CACHE_SHIFT) -
3387 (eb->start >> PAGE_CACHE_SHIFT);
3392 num_pages = num_extent_pages(eb->start, eb->len);
3393 for (i = start_i; i < num_pages; i++) {
3394 page = extent_buffer_page(eb, i);
3396 if (!trylock_page(page))
3402 if (!PageUptodate(page))
3407 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3411 for (i = start_i; i < num_pages; i++) {
3412 page = extent_buffer_page(eb, i);
3414 WARN_ON(!PagePrivate(page));
3416 set_page_extent_mapped(page);
3418 set_page_extent_head(page, eb->len);
3421 page_cache_get(page);
3422 if (!PageUptodate(page)) {
3425 ClearPageError(page);
3426 err = __extent_read_full_page(tree, page,
3428 mirror_num, &bio_flags);
3437 submit_one_bio(READ, bio, mirror_num, bio_flags);
3442 for (i = start_i; i < num_pages; i++) {
3443 page = extent_buffer_page(eb, i);
3444 wait_on_page_locked(page);
3445 if (!PageUptodate(page))
3450 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3455 while (locked_pages > 0) {
3456 page = extent_buffer_page(eb, i);
3464 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3465 unsigned long start,
3472 char *dst = (char *)dstv;
3473 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3474 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3476 WARN_ON(start > eb->len);
3477 WARN_ON(start + len > eb->start + eb->len);
3479 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3482 page = extent_buffer_page(eb, i);
3484 cur = min(len, (PAGE_CACHE_SIZE - offset));
3485 kaddr = kmap_atomic(page, KM_USER1);
3486 memcpy(dst, kaddr + offset, cur);
3487 kunmap_atomic(kaddr, KM_USER1);
3496 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3497 unsigned long min_len, char **token, char **map,
3498 unsigned long *map_start,
3499 unsigned long *map_len, int km)
3501 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3504 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3505 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3506 unsigned long end_i = (start_offset + start + min_len - 1) >>
3513 offset = start_offset;
3517 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3520 if (start + min_len > eb->len) {
3521 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3522 "wanted %lu %lu\n", (unsigned long long)eb->start,
3523 eb->len, start, min_len);
3528 p = extent_buffer_page(eb, i);
3529 kaddr = kmap_atomic(p, km);
3531 *map = kaddr + offset;
3532 *map_len = PAGE_CACHE_SIZE - offset;
3536 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3537 unsigned long min_len,
3538 char **token, char **map,
3539 unsigned long *map_start,
3540 unsigned long *map_len, int km)
3544 if (eb->map_token) {
3545 unmap_extent_buffer(eb, eb->map_token, km);
3546 eb->map_token = NULL;
3549 err = map_private_extent_buffer(eb, start, min_len, token, map,
3550 map_start, map_len, km);
3552 eb->map_token = *token;
3554 eb->map_start = *map_start;
3555 eb->map_len = *map_len;
3560 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3562 kunmap_atomic(token, km);
3565 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3566 unsigned long start,
3573 char *ptr = (char *)ptrv;
3574 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3575 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3578 WARN_ON(start > eb->len);
3579 WARN_ON(start + len > eb->start + eb->len);
3581 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3584 page = extent_buffer_page(eb, i);
3586 cur = min(len, (PAGE_CACHE_SIZE - offset));
3588 kaddr = kmap_atomic(page, KM_USER0);
3589 ret = memcmp(ptr, kaddr + offset, cur);
3590 kunmap_atomic(kaddr, KM_USER0);
3602 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3603 unsigned long start, unsigned long len)
3609 char *src = (char *)srcv;
3610 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3611 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3613 WARN_ON(start > eb->len);
3614 WARN_ON(start + len > eb->start + eb->len);
3616 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3619 page = extent_buffer_page(eb, i);
3620 WARN_ON(!PageUptodate(page));
3622 cur = min(len, PAGE_CACHE_SIZE - offset);
3623 kaddr = kmap_atomic(page, KM_USER1);
3624 memcpy(kaddr + offset, src, cur);
3625 kunmap_atomic(kaddr, KM_USER1);
3634 void memset_extent_buffer(struct extent_buffer *eb, char c,
3635 unsigned long start, unsigned long len)
3641 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3642 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3644 WARN_ON(start > eb->len);
3645 WARN_ON(start + len > eb->start + eb->len);
3647 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3650 page = extent_buffer_page(eb, i);
3651 WARN_ON(!PageUptodate(page));
3653 cur = min(len, PAGE_CACHE_SIZE - offset);
3654 kaddr = kmap_atomic(page, KM_USER0);
3655 memset(kaddr + offset, c, cur);
3656 kunmap_atomic(kaddr, KM_USER0);
3664 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3665 unsigned long dst_offset, unsigned long src_offset,
3668 u64 dst_len = dst->len;
3673 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3674 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3676 WARN_ON(src->len != dst_len);
3678 offset = (start_offset + dst_offset) &
3679 ((unsigned long)PAGE_CACHE_SIZE - 1);
3682 page = extent_buffer_page(dst, i);
3683 WARN_ON(!PageUptodate(page));
3685 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3687 kaddr = kmap_atomic(page, KM_USER0);
3688 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3689 kunmap_atomic(kaddr, KM_USER0);
3698 static void move_pages(struct page *dst_page, struct page *src_page,
3699 unsigned long dst_off, unsigned long src_off,
3702 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3703 if (dst_page == src_page) {
3704 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3706 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3707 char *p = dst_kaddr + dst_off + len;
3708 char *s = src_kaddr + src_off + len;
3713 kunmap_atomic(src_kaddr, KM_USER1);
3715 kunmap_atomic(dst_kaddr, KM_USER0);
3718 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3720 unsigned long distance = (src > dst) ? src - dst : dst - src;
3721 return distance < len;
3724 static void copy_pages(struct page *dst_page, struct page *src_page,
3725 unsigned long dst_off, unsigned long src_off,
3728 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3731 if (dst_page != src_page) {
3732 src_kaddr = kmap_atomic(src_page, KM_USER1);
3734 src_kaddr = dst_kaddr;
3735 BUG_ON(areas_overlap(src_off, dst_off, len));
3738 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3739 kunmap_atomic(dst_kaddr, KM_USER0);
3740 if (dst_page != src_page)
3741 kunmap_atomic(src_kaddr, KM_USER1);
3744 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3745 unsigned long src_offset, unsigned long len)
3748 size_t dst_off_in_page;
3749 size_t src_off_in_page;
3750 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3751 unsigned long dst_i;
3752 unsigned long src_i;
3754 if (src_offset + len > dst->len) {
3755 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3756 "len %lu dst len %lu\n", src_offset, len, dst->len);
3759 if (dst_offset + len > dst->len) {
3760 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3761 "len %lu dst len %lu\n", dst_offset, len, dst->len);
3766 dst_off_in_page = (start_offset + dst_offset) &
3767 ((unsigned long)PAGE_CACHE_SIZE - 1);
3768 src_off_in_page = (start_offset + src_offset) &
3769 ((unsigned long)PAGE_CACHE_SIZE - 1);
3771 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3772 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3774 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3776 cur = min_t(unsigned long, cur,
3777 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3779 copy_pages(extent_buffer_page(dst, dst_i),
3780 extent_buffer_page(dst, src_i),
3781 dst_off_in_page, src_off_in_page, cur);
3789 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3790 unsigned long src_offset, unsigned long len)
3793 size_t dst_off_in_page;
3794 size_t src_off_in_page;
3795 unsigned long dst_end = dst_offset + len - 1;
3796 unsigned long src_end = src_offset + len - 1;
3797 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3798 unsigned long dst_i;
3799 unsigned long src_i;
3801 if (src_offset + len > dst->len) {
3802 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3803 "len %lu len %lu\n", src_offset, len, dst->len);
3806 if (dst_offset + len > dst->len) {
3807 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3808 "len %lu len %lu\n", dst_offset, len, dst->len);
3811 if (!areas_overlap(src_offset, dst_offset, len)) {
3812 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3816 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3817 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3819 dst_off_in_page = (start_offset + dst_end) &
3820 ((unsigned long)PAGE_CACHE_SIZE - 1);
3821 src_off_in_page = (start_offset + src_end) &
3822 ((unsigned long)PAGE_CACHE_SIZE - 1);
3824 cur = min_t(unsigned long, len, src_off_in_page + 1);
3825 cur = min(cur, dst_off_in_page + 1);
3826 move_pages(extent_buffer_page(dst, dst_i),
3827 extent_buffer_page(dst, src_i),
3828 dst_off_in_page - cur + 1,
3829 src_off_in_page - cur + 1, cur);
3837 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3839 struct extent_buffer *eb =
3840 container_of(head, struct extent_buffer, rcu_head);
3842 btrfs_release_extent_buffer(eb);
3845 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3847 u64 start = page_offset(page);
3848 struct extent_buffer *eb;
3851 spin_lock(&tree->buffer_lock);
3852 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3854 spin_unlock(&tree->buffer_lock);
3858 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3864 * set @eb->refs to 0 if it is already 1, and then release the @eb.
3867 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
3872 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3874 spin_unlock(&tree->buffer_lock);
3876 /* at this point we can safely release the extent buffer */
3877 if (atomic_read(&eb->refs) == 0)
3878 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);