2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
47 #include <trace/events/ext4.h>
50 * used by extent splitting.
52 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
54 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
55 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
57 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
58 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
60 static __le32 ext4_extent_block_csum(struct inode *inode,
61 struct ext4_extent_header *eh)
63 struct ext4_inode_info *ei = EXT4_I(inode);
64 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
67 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
68 EXT4_EXTENT_TAIL_OFFSET(eh));
69 return cpu_to_le32(csum);
72 static int ext4_extent_block_csum_verify(struct inode *inode,
73 struct ext4_extent_header *eh)
75 struct ext4_extent_tail *et;
77 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
78 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
81 et = find_ext4_extent_tail(eh);
82 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
87 static void ext4_extent_block_csum_set(struct inode *inode,
88 struct ext4_extent_header *eh)
90 struct ext4_extent_tail *et;
92 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
93 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
96 et = find_ext4_extent_tail(eh);
97 et->et_checksum = ext4_extent_block_csum(inode, eh);
100 static int ext4_split_extent(handle_t *handle,
102 struct ext4_ext_path *path,
103 struct ext4_map_blocks *map,
107 static int ext4_split_extent_at(handle_t *handle,
109 struct ext4_ext_path *path,
114 static int ext4_find_delayed_extent(struct inode *inode,
115 struct ext4_ext_cache *newex);
117 static int ext4_ext_truncate_extend_restart(handle_t *handle,
123 if (!ext4_handle_valid(handle))
125 if (handle->h_buffer_credits > needed)
127 err = ext4_journal_extend(handle, needed);
130 err = ext4_truncate_restart_trans(handle, inode, needed);
142 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143 struct ext4_ext_path *path)
146 /* path points to block */
147 return ext4_journal_get_write_access(handle, path->p_bh);
149 /* path points to leaf/index in inode body */
150 /* we use in-core data, no need to protect them */
160 #define ext4_ext_dirty(handle, inode, path) \
161 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
162 static int __ext4_ext_dirty(const char *where, unsigned int line,
163 handle_t *handle, struct inode *inode,
164 struct ext4_ext_path *path)
168 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
169 /* path points to block */
170 err = __ext4_handle_dirty_metadata(where, line, handle,
173 /* path points to leaf/index in inode body */
174 err = ext4_mark_inode_dirty(handle, inode);
179 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
180 struct ext4_ext_path *path,
184 int depth = path->p_depth;
185 struct ext4_extent *ex;
188 * Try to predict block placement assuming that we are
189 * filling in a file which will eventually be
190 * non-sparse --- i.e., in the case of libbfd writing
191 * an ELF object sections out-of-order but in a way
192 * the eventually results in a contiguous object or
193 * executable file, or some database extending a table
194 * space file. However, this is actually somewhat
195 * non-ideal if we are writing a sparse file such as
196 * qemu or KVM writing a raw image file that is going
197 * to stay fairly sparse, since it will end up
198 * fragmenting the file system's free space. Maybe we
199 * should have some hueristics or some way to allow
200 * userspace to pass a hint to file system,
201 * especially if the latter case turns out to be
204 ex = path[depth].p_ext;
206 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
207 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
209 if (block > ext_block)
210 return ext_pblk + (block - ext_block);
212 return ext_pblk - (ext_block - block);
215 /* it looks like index is empty;
216 * try to find starting block from index itself */
217 if (path[depth].p_bh)
218 return path[depth].p_bh->b_blocknr;
221 /* OK. use inode's group */
222 return ext4_inode_to_goal_block(inode);
226 * Allocation for a meta data block
229 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
230 struct ext4_ext_path *path,
231 struct ext4_extent *ex, int *err, unsigned int flags)
233 ext4_fsblk_t goal, newblock;
235 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
236 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
241 static inline int ext4_ext_space_block(struct inode *inode, int check)
245 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
246 / sizeof(struct ext4_extent);
247 #ifdef AGGRESSIVE_TEST
248 if (!check && size > 6)
254 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
258 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
259 / sizeof(struct ext4_extent_idx);
260 #ifdef AGGRESSIVE_TEST
261 if (!check && size > 5)
267 static inline int ext4_ext_space_root(struct inode *inode, int check)
271 size = sizeof(EXT4_I(inode)->i_data);
272 size -= sizeof(struct ext4_extent_header);
273 size /= sizeof(struct ext4_extent);
274 #ifdef AGGRESSIVE_TEST
275 if (!check && size > 3)
281 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
285 size = sizeof(EXT4_I(inode)->i_data);
286 size -= sizeof(struct ext4_extent_header);
287 size /= sizeof(struct ext4_extent_idx);
288 #ifdef AGGRESSIVE_TEST
289 if (!check && size > 4)
296 * Calculate the number of metadata blocks needed
297 * to allocate @blocks
298 * Worse case is one block per extent
300 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
302 struct ext4_inode_info *ei = EXT4_I(inode);
305 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
306 / sizeof(struct ext4_extent_idx));
309 * If the new delayed allocation block is contiguous with the
310 * previous da block, it can share index blocks with the
311 * previous block, so we only need to allocate a new index
312 * block every idxs leaf blocks. At ldxs**2 blocks, we need
313 * an additional index block, and at ldxs**3 blocks, yet
314 * another index blocks.
316 if (ei->i_da_metadata_calc_len &&
317 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
320 if ((ei->i_da_metadata_calc_len % idxs) == 0)
322 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
324 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
326 ei->i_da_metadata_calc_len = 0;
328 ei->i_da_metadata_calc_len++;
329 ei->i_da_metadata_calc_last_lblock++;
334 * In the worst case we need a new set of index blocks at
335 * every level of the inode's extent tree.
337 ei->i_da_metadata_calc_len = 1;
338 ei->i_da_metadata_calc_last_lblock = lblock;
339 return ext_depth(inode) + 1;
343 ext4_ext_max_entries(struct inode *inode, int depth)
347 if (depth == ext_depth(inode)) {
349 max = ext4_ext_space_root(inode, 1);
351 max = ext4_ext_space_root_idx(inode, 1);
354 max = ext4_ext_space_block(inode, 1);
356 max = ext4_ext_space_block_idx(inode, 1);
362 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
364 ext4_fsblk_t block = ext4_ext_pblock(ext);
365 int len = ext4_ext_get_actual_len(ext);
369 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
372 static int ext4_valid_extent_idx(struct inode *inode,
373 struct ext4_extent_idx *ext_idx)
375 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
377 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
380 static int ext4_valid_extent_entries(struct inode *inode,
381 struct ext4_extent_header *eh,
384 unsigned short entries;
385 if (eh->eh_entries == 0)
388 entries = le16_to_cpu(eh->eh_entries);
392 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
394 if (!ext4_valid_extent(inode, ext))
400 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
402 if (!ext4_valid_extent_idx(inode, ext_idx))
411 static int __ext4_ext_check(const char *function, unsigned int line,
412 struct inode *inode, struct ext4_extent_header *eh,
415 const char *error_msg;
418 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
419 error_msg = "invalid magic";
422 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
423 error_msg = "unexpected eh_depth";
426 if (unlikely(eh->eh_max == 0)) {
427 error_msg = "invalid eh_max";
430 max = ext4_ext_max_entries(inode, depth);
431 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
432 error_msg = "too large eh_max";
435 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
436 error_msg = "invalid eh_entries";
439 if (!ext4_valid_extent_entries(inode, eh, depth)) {
440 error_msg = "invalid extent entries";
443 /* Verify checksum on non-root extent tree nodes */
444 if (ext_depth(inode) != depth &&
445 !ext4_extent_block_csum_verify(inode, eh)) {
446 error_msg = "extent tree corrupted";
452 ext4_error_inode(inode, function, line, 0,
453 "bad header/extent: %s - magic %x, "
454 "entries %u, max %u(%u), depth %u(%u)",
455 error_msg, le16_to_cpu(eh->eh_magic),
456 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
457 max, le16_to_cpu(eh->eh_depth), depth);
462 #define ext4_ext_check(inode, eh, depth) \
463 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
465 int ext4_ext_check_inode(struct inode *inode)
467 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
470 static int __ext4_ext_check_block(const char *function, unsigned int line,
472 struct ext4_extent_header *eh,
474 struct buffer_head *bh)
478 if (buffer_verified(bh))
480 ret = ext4_ext_check(inode, eh, depth);
483 set_buffer_verified(bh);
487 #define ext4_ext_check_block(inode, eh, depth, bh) \
488 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
491 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
493 int k, l = path->p_depth;
496 for (k = 0; k <= l; k++, path++) {
498 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
499 ext4_idx_pblock(path->p_idx));
500 } else if (path->p_ext) {
501 ext_debug(" %d:[%d]%d:%llu ",
502 le32_to_cpu(path->p_ext->ee_block),
503 ext4_ext_is_uninitialized(path->p_ext),
504 ext4_ext_get_actual_len(path->p_ext),
505 ext4_ext_pblock(path->p_ext));
512 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
514 int depth = ext_depth(inode);
515 struct ext4_extent_header *eh;
516 struct ext4_extent *ex;
522 eh = path[depth].p_hdr;
523 ex = EXT_FIRST_EXTENT(eh);
525 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
527 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
528 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
529 ext4_ext_is_uninitialized(ex),
530 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
535 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
536 ext4_fsblk_t newblock, int level)
538 int depth = ext_depth(inode);
539 struct ext4_extent *ex;
541 if (depth != level) {
542 struct ext4_extent_idx *idx;
543 idx = path[level].p_idx;
544 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
545 ext_debug("%d: move %d:%llu in new index %llu\n", level,
546 le32_to_cpu(idx->ei_block),
547 ext4_idx_pblock(idx),
555 ex = path[depth].p_ext;
556 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
557 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
558 le32_to_cpu(ex->ee_block),
560 ext4_ext_is_uninitialized(ex),
561 ext4_ext_get_actual_len(ex),
568 #define ext4_ext_show_path(inode, path)
569 #define ext4_ext_show_leaf(inode, path)
570 #define ext4_ext_show_move(inode, path, newblock, level)
573 void ext4_ext_drop_refs(struct ext4_ext_path *path)
575 int depth = path->p_depth;
578 for (i = 0; i <= depth; i++, path++)
586 * ext4_ext_binsearch_idx:
587 * binary search for the closest index of the given block
588 * the header must be checked before calling this
591 ext4_ext_binsearch_idx(struct inode *inode,
592 struct ext4_ext_path *path, ext4_lblk_t block)
594 struct ext4_extent_header *eh = path->p_hdr;
595 struct ext4_extent_idx *r, *l, *m;
598 ext_debug("binsearch for %u(idx): ", block);
600 l = EXT_FIRST_INDEX(eh) + 1;
601 r = EXT_LAST_INDEX(eh);
604 if (block < le32_to_cpu(m->ei_block))
608 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
609 m, le32_to_cpu(m->ei_block),
610 r, le32_to_cpu(r->ei_block));
614 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
615 ext4_idx_pblock(path->p_idx));
617 #ifdef CHECK_BINSEARCH
619 struct ext4_extent_idx *chix, *ix;
622 chix = ix = EXT_FIRST_INDEX(eh);
623 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
625 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
626 printk(KERN_DEBUG "k=%d, ix=0x%p, "
628 ix, EXT_FIRST_INDEX(eh));
629 printk(KERN_DEBUG "%u <= %u\n",
630 le32_to_cpu(ix->ei_block),
631 le32_to_cpu(ix[-1].ei_block));
633 BUG_ON(k && le32_to_cpu(ix->ei_block)
634 <= le32_to_cpu(ix[-1].ei_block));
635 if (block < le32_to_cpu(ix->ei_block))
639 BUG_ON(chix != path->p_idx);
646 * ext4_ext_binsearch:
647 * binary search for closest extent of the given block
648 * the header must be checked before calling this
651 ext4_ext_binsearch(struct inode *inode,
652 struct ext4_ext_path *path, ext4_lblk_t block)
654 struct ext4_extent_header *eh = path->p_hdr;
655 struct ext4_extent *r, *l, *m;
657 if (eh->eh_entries == 0) {
659 * this leaf is empty:
660 * we get such a leaf in split/add case
665 ext_debug("binsearch for %u: ", block);
667 l = EXT_FIRST_EXTENT(eh) + 1;
668 r = EXT_LAST_EXTENT(eh);
672 if (block < le32_to_cpu(m->ee_block))
676 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
677 m, le32_to_cpu(m->ee_block),
678 r, le32_to_cpu(r->ee_block));
682 ext_debug(" -> %d:%llu:[%d]%d ",
683 le32_to_cpu(path->p_ext->ee_block),
684 ext4_ext_pblock(path->p_ext),
685 ext4_ext_is_uninitialized(path->p_ext),
686 ext4_ext_get_actual_len(path->p_ext));
688 #ifdef CHECK_BINSEARCH
690 struct ext4_extent *chex, *ex;
693 chex = ex = EXT_FIRST_EXTENT(eh);
694 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
695 BUG_ON(k && le32_to_cpu(ex->ee_block)
696 <= le32_to_cpu(ex[-1].ee_block));
697 if (block < le32_to_cpu(ex->ee_block))
701 BUG_ON(chex != path->p_ext);
707 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
709 struct ext4_extent_header *eh;
711 eh = ext_inode_hdr(inode);
714 eh->eh_magic = EXT4_EXT_MAGIC;
715 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
716 ext4_mark_inode_dirty(handle, inode);
717 ext4_ext_invalidate_cache(inode);
721 struct ext4_ext_path *
722 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
723 struct ext4_ext_path *path)
725 struct ext4_extent_header *eh;
726 struct buffer_head *bh;
727 short int depth, i, ppos = 0, alloc = 0;
730 eh = ext_inode_hdr(inode);
731 depth = ext_depth(inode);
733 /* account possible depth increase */
735 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
738 return ERR_PTR(-ENOMEM);
745 /* walk through the tree */
747 ext_debug("depth %d: num %d, max %d\n",
748 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
750 ext4_ext_binsearch_idx(inode, path + ppos, block);
751 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
752 path[ppos].p_depth = i;
753 path[ppos].p_ext = NULL;
755 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
760 if (!bh_uptodate_or_lock(bh)) {
761 trace_ext4_ext_load_extent(inode, block,
763 ret = bh_submit_read(bh);
769 eh = ext_block_hdr(bh);
771 if (unlikely(ppos > depth)) {
773 EXT4_ERROR_INODE(inode,
774 "ppos %d > depth %d", ppos, depth);
778 path[ppos].p_bh = bh;
779 path[ppos].p_hdr = eh;
782 ret = ext4_ext_check_block(inode, eh, i, bh);
787 path[ppos].p_depth = i;
788 path[ppos].p_ext = NULL;
789 path[ppos].p_idx = NULL;
792 ext4_ext_binsearch(inode, path + ppos, block);
793 /* if not an empty leaf */
794 if (path[ppos].p_ext)
795 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
797 ext4_ext_show_path(inode, path);
802 ext4_ext_drop_refs(path);
809 * ext4_ext_insert_index:
810 * insert new index [@logical;@ptr] into the block at @curp;
811 * check where to insert: before @curp or after @curp
813 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
814 struct ext4_ext_path *curp,
815 int logical, ext4_fsblk_t ptr)
817 struct ext4_extent_idx *ix;
820 err = ext4_ext_get_access(handle, inode, curp);
824 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
825 EXT4_ERROR_INODE(inode,
826 "logical %d == ei_block %d!",
827 logical, le32_to_cpu(curp->p_idx->ei_block));
831 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
832 >= le16_to_cpu(curp->p_hdr->eh_max))) {
833 EXT4_ERROR_INODE(inode,
834 "eh_entries %d >= eh_max %d!",
835 le16_to_cpu(curp->p_hdr->eh_entries),
836 le16_to_cpu(curp->p_hdr->eh_max));
840 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
842 ext_debug("insert new index %d after: %llu\n", logical, ptr);
843 ix = curp->p_idx + 1;
846 ext_debug("insert new index %d before: %llu\n", logical, ptr);
850 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
853 ext_debug("insert new index %d: "
854 "move %d indices from 0x%p to 0x%p\n",
855 logical, len, ix, ix + 1);
856 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
859 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
860 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
864 ix->ei_block = cpu_to_le32(logical);
865 ext4_idx_store_pblock(ix, ptr);
866 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
868 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
869 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
873 err = ext4_ext_dirty(handle, inode, curp);
874 ext4_std_error(inode->i_sb, err);
881 * inserts new subtree into the path, using free index entry
883 * - allocates all needed blocks (new leaf and all intermediate index blocks)
884 * - makes decision where to split
885 * - moves remaining extents and index entries (right to the split point)
886 * into the newly allocated blocks
887 * - initializes subtree
889 static int ext4_ext_split(handle_t *handle, struct inode *inode,
891 struct ext4_ext_path *path,
892 struct ext4_extent *newext, int at)
894 struct buffer_head *bh = NULL;
895 int depth = ext_depth(inode);
896 struct ext4_extent_header *neh;
897 struct ext4_extent_idx *fidx;
899 ext4_fsblk_t newblock, oldblock;
901 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
904 /* make decision: where to split? */
905 /* FIXME: now decision is simplest: at current extent */
907 /* if current leaf will be split, then we should use
908 * border from split point */
909 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
910 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
913 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
914 border = path[depth].p_ext[1].ee_block;
915 ext_debug("leaf will be split."
916 " next leaf starts at %d\n",
917 le32_to_cpu(border));
919 border = newext->ee_block;
920 ext_debug("leaf will be added."
921 " next leaf starts at %d\n",
922 le32_to_cpu(border));
926 * If error occurs, then we break processing
927 * and mark filesystem read-only. index won't
928 * be inserted and tree will be in consistent
929 * state. Next mount will repair buffers too.
933 * Get array to track all allocated blocks.
934 * We need this to handle errors and free blocks
937 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
941 /* allocate all needed blocks */
942 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
943 for (a = 0; a < depth - at; a++) {
944 newblock = ext4_ext_new_meta_block(handle, inode, path,
945 newext, &err, flags);
948 ablocks[a] = newblock;
951 /* initialize new leaf */
952 newblock = ablocks[--a];
953 if (unlikely(newblock == 0)) {
954 EXT4_ERROR_INODE(inode, "newblock == 0!");
958 bh = sb_getblk(inode->i_sb, newblock);
965 err = ext4_journal_get_create_access(handle, bh);
969 neh = ext_block_hdr(bh);
971 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
972 neh->eh_magic = EXT4_EXT_MAGIC;
975 /* move remainder of path[depth] to the new leaf */
976 if (unlikely(path[depth].p_hdr->eh_entries !=
977 path[depth].p_hdr->eh_max)) {
978 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
979 path[depth].p_hdr->eh_entries,
980 path[depth].p_hdr->eh_max);
984 /* start copy from next extent */
985 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
986 ext4_ext_show_move(inode, path, newblock, depth);
988 struct ext4_extent *ex;
989 ex = EXT_FIRST_EXTENT(neh);
990 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
991 le16_add_cpu(&neh->eh_entries, m);
994 ext4_extent_block_csum_set(inode, neh);
995 set_buffer_uptodate(bh);
998 err = ext4_handle_dirty_metadata(handle, inode, bh);
1004 /* correct old leaf */
1006 err = ext4_ext_get_access(handle, inode, path + depth);
1009 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1010 err = ext4_ext_dirty(handle, inode, path + depth);
1016 /* create intermediate indexes */
1018 if (unlikely(k < 0)) {
1019 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1024 ext_debug("create %d intermediate indices\n", k);
1025 /* insert new index into current index block */
1026 /* current depth stored in i var */
1029 oldblock = newblock;
1030 newblock = ablocks[--a];
1031 bh = sb_getblk(inode->i_sb, newblock);
1032 if (unlikely(!bh)) {
1038 err = ext4_journal_get_create_access(handle, bh);
1042 neh = ext_block_hdr(bh);
1043 neh->eh_entries = cpu_to_le16(1);
1044 neh->eh_magic = EXT4_EXT_MAGIC;
1045 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1046 neh->eh_depth = cpu_to_le16(depth - i);
1047 fidx = EXT_FIRST_INDEX(neh);
1048 fidx->ei_block = border;
1049 ext4_idx_store_pblock(fidx, oldblock);
1051 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1052 i, newblock, le32_to_cpu(border), oldblock);
1054 /* move remainder of path[i] to the new index block */
1055 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1056 EXT_LAST_INDEX(path[i].p_hdr))) {
1057 EXT4_ERROR_INODE(inode,
1058 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1059 le32_to_cpu(path[i].p_ext->ee_block));
1063 /* start copy indexes */
1064 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1065 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1066 EXT_MAX_INDEX(path[i].p_hdr));
1067 ext4_ext_show_move(inode, path, newblock, i);
1069 memmove(++fidx, path[i].p_idx,
1070 sizeof(struct ext4_extent_idx) * m);
1071 le16_add_cpu(&neh->eh_entries, m);
1073 ext4_extent_block_csum_set(inode, neh);
1074 set_buffer_uptodate(bh);
1077 err = ext4_handle_dirty_metadata(handle, inode, bh);
1083 /* correct old index */
1085 err = ext4_ext_get_access(handle, inode, path + i);
1088 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1089 err = ext4_ext_dirty(handle, inode, path + i);
1097 /* insert new index */
1098 err = ext4_ext_insert_index(handle, inode, path + at,
1099 le32_to_cpu(border), newblock);
1103 if (buffer_locked(bh))
1109 /* free all allocated blocks in error case */
1110 for (i = 0; i < depth; i++) {
1113 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1114 EXT4_FREE_BLOCKS_METADATA);
1123 * ext4_ext_grow_indepth:
1124 * implements tree growing procedure:
1125 * - allocates new block
1126 * - moves top-level data (index block or leaf) into the new block
1127 * - initializes new top-level, creating index that points to the
1128 * just created block
1130 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1132 struct ext4_extent *newext)
1134 struct ext4_extent_header *neh;
1135 struct buffer_head *bh;
1136 ext4_fsblk_t newblock;
1139 newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1140 newext, &err, flags);
1144 bh = sb_getblk(inode->i_sb, newblock);
1149 err = ext4_journal_get_create_access(handle, bh);
1155 /* move top-level index/leaf into new block */
1156 memmove(bh->b_data, EXT4_I(inode)->i_data,
1157 sizeof(EXT4_I(inode)->i_data));
1159 /* set size of new block */
1160 neh = ext_block_hdr(bh);
1161 /* old root could have indexes or leaves
1162 * so calculate e_max right way */
1163 if (ext_depth(inode))
1164 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1166 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1167 neh->eh_magic = EXT4_EXT_MAGIC;
1168 ext4_extent_block_csum_set(inode, neh);
1169 set_buffer_uptodate(bh);
1172 err = ext4_handle_dirty_metadata(handle, inode, bh);
1176 /* Update top-level index: num,max,pointer */
1177 neh = ext_inode_hdr(inode);
1178 neh->eh_entries = cpu_to_le16(1);
1179 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1180 if (neh->eh_depth == 0) {
1181 /* Root extent block becomes index block */
1182 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1183 EXT_FIRST_INDEX(neh)->ei_block =
1184 EXT_FIRST_EXTENT(neh)->ee_block;
1186 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1187 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1188 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1189 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1191 le16_add_cpu(&neh->eh_depth, 1);
1192 ext4_mark_inode_dirty(handle, inode);
1200 * ext4_ext_create_new_leaf:
1201 * finds empty index and adds new leaf.
1202 * if no free index is found, then it requests in-depth growing.
1204 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1206 struct ext4_ext_path *path,
1207 struct ext4_extent *newext)
1209 struct ext4_ext_path *curp;
1210 int depth, i, err = 0;
1213 i = depth = ext_depth(inode);
1215 /* walk up to the tree and look for free index entry */
1216 curp = path + depth;
1217 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1222 /* we use already allocated block for index block,
1223 * so subsequent data blocks should be contiguous */
1224 if (EXT_HAS_FREE_INDEX(curp)) {
1225 /* if we found index with free entry, then use that
1226 * entry: create all needed subtree and add new leaf */
1227 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1232 ext4_ext_drop_refs(path);
1233 path = ext4_ext_find_extent(inode,
1234 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1237 err = PTR_ERR(path);
1239 /* tree is full, time to grow in depth */
1240 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1245 ext4_ext_drop_refs(path);
1246 path = ext4_ext_find_extent(inode,
1247 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1250 err = PTR_ERR(path);
1255 * only first (depth 0 -> 1) produces free space;
1256 * in all other cases we have to split the grown tree
1258 depth = ext_depth(inode);
1259 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1260 /* now we need to split */
1270 * search the closest allocated block to the left for *logical
1271 * and returns it at @logical + it's physical address at @phys
1272 * if *logical is the smallest allocated block, the function
1273 * returns 0 at @phys
1274 * return value contains 0 (success) or error code
1276 static int ext4_ext_search_left(struct inode *inode,
1277 struct ext4_ext_path *path,
1278 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1280 struct ext4_extent_idx *ix;
1281 struct ext4_extent *ex;
1284 if (unlikely(path == NULL)) {
1285 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1288 depth = path->p_depth;
1291 if (depth == 0 && path->p_ext == NULL)
1294 /* usually extent in the path covers blocks smaller
1295 * then *logical, but it can be that extent is the
1296 * first one in the file */
1298 ex = path[depth].p_ext;
1299 ee_len = ext4_ext_get_actual_len(ex);
1300 if (*logical < le32_to_cpu(ex->ee_block)) {
1301 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1302 EXT4_ERROR_INODE(inode,
1303 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1304 *logical, le32_to_cpu(ex->ee_block));
1307 while (--depth >= 0) {
1308 ix = path[depth].p_idx;
1309 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1310 EXT4_ERROR_INODE(inode,
1311 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1312 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1313 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1314 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1322 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1323 EXT4_ERROR_INODE(inode,
1324 "logical %d < ee_block %d + ee_len %d!",
1325 *logical, le32_to_cpu(ex->ee_block), ee_len);
1329 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1330 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1335 * search the closest allocated block to the right for *logical
1336 * and returns it at @logical + it's physical address at @phys
1337 * if *logical is the largest allocated block, the function
1338 * returns 0 at @phys
1339 * return value contains 0 (success) or error code
1341 static int ext4_ext_search_right(struct inode *inode,
1342 struct ext4_ext_path *path,
1343 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1344 struct ext4_extent **ret_ex)
1346 struct buffer_head *bh = NULL;
1347 struct ext4_extent_header *eh;
1348 struct ext4_extent_idx *ix;
1349 struct ext4_extent *ex;
1351 int depth; /* Note, NOT eh_depth; depth from top of tree */
1354 if (unlikely(path == NULL)) {
1355 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1358 depth = path->p_depth;
1361 if (depth == 0 && path->p_ext == NULL)
1364 /* usually extent in the path covers blocks smaller
1365 * then *logical, but it can be that extent is the
1366 * first one in the file */
1368 ex = path[depth].p_ext;
1369 ee_len = ext4_ext_get_actual_len(ex);
1370 if (*logical < le32_to_cpu(ex->ee_block)) {
1371 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1372 EXT4_ERROR_INODE(inode,
1373 "first_extent(path[%d].p_hdr) != ex",
1377 while (--depth >= 0) {
1378 ix = path[depth].p_idx;
1379 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1380 EXT4_ERROR_INODE(inode,
1381 "ix != EXT_FIRST_INDEX *logical %d!",
1389 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1390 EXT4_ERROR_INODE(inode,
1391 "logical %d < ee_block %d + ee_len %d!",
1392 *logical, le32_to_cpu(ex->ee_block), ee_len);
1396 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1397 /* next allocated block in this leaf */
1402 /* go up and search for index to the right */
1403 while (--depth >= 0) {
1404 ix = path[depth].p_idx;
1405 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1409 /* we've gone up to the root and found no index to the right */
1413 /* we've found index to the right, let's
1414 * follow it and find the closest allocated
1415 * block to the right */
1417 block = ext4_idx_pblock(ix);
1418 while (++depth < path->p_depth) {
1419 bh = sb_bread(inode->i_sb, block);
1422 eh = ext_block_hdr(bh);
1423 /* subtract from p_depth to get proper eh_depth */
1424 if (ext4_ext_check_block(inode, eh,
1425 path->p_depth - depth, bh)) {
1429 ix = EXT_FIRST_INDEX(eh);
1430 block = ext4_idx_pblock(ix);
1434 bh = sb_bread(inode->i_sb, block);
1437 eh = ext_block_hdr(bh);
1438 if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1442 ex = EXT_FIRST_EXTENT(eh);
1444 *logical = le32_to_cpu(ex->ee_block);
1445 *phys = ext4_ext_pblock(ex);
1453 * ext4_ext_next_allocated_block:
1454 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1455 * NOTE: it considers block number from index entry as
1456 * allocated block. Thus, index entries have to be consistent
1460 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1464 BUG_ON(path == NULL);
1465 depth = path->p_depth;
1467 if (depth == 0 && path->p_ext == NULL)
1468 return EXT_MAX_BLOCKS;
1470 while (depth >= 0) {
1471 if (depth == path->p_depth) {
1473 if (path[depth].p_ext &&
1474 path[depth].p_ext !=
1475 EXT_LAST_EXTENT(path[depth].p_hdr))
1476 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1479 if (path[depth].p_idx !=
1480 EXT_LAST_INDEX(path[depth].p_hdr))
1481 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1486 return EXT_MAX_BLOCKS;
1490 * ext4_ext_next_leaf_block:
1491 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1493 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1497 BUG_ON(path == NULL);
1498 depth = path->p_depth;
1500 /* zero-tree has no leaf blocks at all */
1502 return EXT_MAX_BLOCKS;
1504 /* go to index block */
1507 while (depth >= 0) {
1508 if (path[depth].p_idx !=
1509 EXT_LAST_INDEX(path[depth].p_hdr))
1510 return (ext4_lblk_t)
1511 le32_to_cpu(path[depth].p_idx[1].ei_block);
1515 return EXT_MAX_BLOCKS;
1519 * ext4_ext_correct_indexes:
1520 * if leaf gets modified and modified extent is first in the leaf,
1521 * then we have to correct all indexes above.
1522 * TODO: do we need to correct tree in all cases?
1524 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1525 struct ext4_ext_path *path)
1527 struct ext4_extent_header *eh;
1528 int depth = ext_depth(inode);
1529 struct ext4_extent *ex;
1533 eh = path[depth].p_hdr;
1534 ex = path[depth].p_ext;
1536 if (unlikely(ex == NULL || eh == NULL)) {
1537 EXT4_ERROR_INODE(inode,
1538 "ex %p == NULL or eh %p == NULL", ex, eh);
1543 /* there is no tree at all */
1547 if (ex != EXT_FIRST_EXTENT(eh)) {
1548 /* we correct tree if first leaf got modified only */
1553 * TODO: we need correction if border is smaller than current one
1556 border = path[depth].p_ext->ee_block;
1557 err = ext4_ext_get_access(handle, inode, path + k);
1560 path[k].p_idx->ei_block = border;
1561 err = ext4_ext_dirty(handle, inode, path + k);
1566 /* change all left-side indexes */
1567 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1569 err = ext4_ext_get_access(handle, inode, path + k);
1572 path[k].p_idx->ei_block = border;
1573 err = ext4_ext_dirty(handle, inode, path + k);
1582 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1583 struct ext4_extent *ex2)
1585 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1588 * Make sure that either both extents are uninitialized, or
1591 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1594 if (ext4_ext_is_uninitialized(ex1))
1595 max_len = EXT_UNINIT_MAX_LEN;
1597 max_len = EXT_INIT_MAX_LEN;
1599 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1600 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1602 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1603 le32_to_cpu(ex2->ee_block))
1607 * To allow future support for preallocated extents to be added
1608 * as an RO_COMPAT feature, refuse to merge to extents if
1609 * this can result in the top bit of ee_len being set.
1611 if (ext1_ee_len + ext2_ee_len > max_len)
1613 #ifdef AGGRESSIVE_TEST
1614 if (ext1_ee_len >= 4)
1618 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1624 * This function tries to merge the "ex" extent to the next extent in the tree.
1625 * It always tries to merge towards right. If you want to merge towards
1626 * left, pass "ex - 1" as argument instead of "ex".
1627 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1628 * 1 if they got merged.
1630 static int ext4_ext_try_to_merge_right(struct inode *inode,
1631 struct ext4_ext_path *path,
1632 struct ext4_extent *ex)
1634 struct ext4_extent_header *eh;
1635 unsigned int depth, len;
1637 int uninitialized = 0;
1639 depth = ext_depth(inode);
1640 BUG_ON(path[depth].p_hdr == NULL);
1641 eh = path[depth].p_hdr;
1643 while (ex < EXT_LAST_EXTENT(eh)) {
1644 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1646 /* merge with next extent! */
1647 if (ext4_ext_is_uninitialized(ex))
1649 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1650 + ext4_ext_get_actual_len(ex + 1));
1652 ext4_ext_mark_uninitialized(ex);
1654 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1655 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1656 * sizeof(struct ext4_extent);
1657 memmove(ex + 1, ex + 2, len);
1659 le16_add_cpu(&eh->eh_entries, -1);
1661 WARN_ON(eh->eh_entries == 0);
1662 if (!eh->eh_entries)
1663 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1670 * This function does a very simple check to see if we can collapse
1671 * an extent tree with a single extent tree leaf block into the inode.
1673 static void ext4_ext_try_to_merge_up(handle_t *handle,
1674 struct inode *inode,
1675 struct ext4_ext_path *path)
1678 unsigned max_root = ext4_ext_space_root(inode, 0);
1681 if ((path[0].p_depth != 1) ||
1682 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1683 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1687 * We need to modify the block allocation bitmap and the block
1688 * group descriptor to release the extent tree block. If we
1689 * can't get the journal credits, give up.
1691 if (ext4_journal_extend(handle, 2))
1695 * Copy the extent data up to the inode
1697 blk = ext4_idx_pblock(path[0].p_idx);
1698 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1699 sizeof(struct ext4_extent_idx);
1700 s += sizeof(struct ext4_extent_header);
1702 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1703 path[0].p_depth = 0;
1704 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1705 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1706 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1708 brelse(path[1].p_bh);
1709 ext4_free_blocks(handle, inode, NULL, blk, 1,
1710 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1714 * This function tries to merge the @ex extent to neighbours in the tree.
1715 * return 1 if merge left else 0.
1717 static void ext4_ext_try_to_merge(handle_t *handle,
1718 struct inode *inode,
1719 struct ext4_ext_path *path,
1720 struct ext4_extent *ex) {
1721 struct ext4_extent_header *eh;
1725 depth = ext_depth(inode);
1726 BUG_ON(path[depth].p_hdr == NULL);
1727 eh = path[depth].p_hdr;
1729 if (ex > EXT_FIRST_EXTENT(eh))
1730 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1733 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1735 ext4_ext_try_to_merge_up(handle, inode, path);
1739 * check if a portion of the "newext" extent overlaps with an
1742 * If there is an overlap discovered, it updates the length of the newext
1743 * such that there will be no overlap, and then returns 1.
1744 * If there is no overlap found, it returns 0.
1746 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1747 struct inode *inode,
1748 struct ext4_extent *newext,
1749 struct ext4_ext_path *path)
1752 unsigned int depth, len1;
1753 unsigned int ret = 0;
1755 b1 = le32_to_cpu(newext->ee_block);
1756 len1 = ext4_ext_get_actual_len(newext);
1757 depth = ext_depth(inode);
1758 if (!path[depth].p_ext)
1760 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1761 b2 &= ~(sbi->s_cluster_ratio - 1);
1764 * get the next allocated block if the extent in the path
1765 * is before the requested block(s)
1768 b2 = ext4_ext_next_allocated_block(path);
1769 if (b2 == EXT_MAX_BLOCKS)
1771 b2 &= ~(sbi->s_cluster_ratio - 1);
1774 /* check for wrap through zero on extent logical start block*/
1775 if (b1 + len1 < b1) {
1776 len1 = EXT_MAX_BLOCKS - b1;
1777 newext->ee_len = cpu_to_le16(len1);
1781 /* check for overlap */
1782 if (b1 + len1 > b2) {
1783 newext->ee_len = cpu_to_le16(b2 - b1);
1791 * ext4_ext_insert_extent:
1792 * tries to merge requsted extent into the existing extent or
1793 * inserts requested extent as new one into the tree,
1794 * creating new leaf in the no-space case.
1796 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1797 struct ext4_ext_path *path,
1798 struct ext4_extent *newext, int flag)
1800 struct ext4_extent_header *eh;
1801 struct ext4_extent *ex, *fex;
1802 struct ext4_extent *nearex; /* nearest extent */
1803 struct ext4_ext_path *npath = NULL;
1804 int depth, len, err;
1806 unsigned uninitialized = 0;
1809 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1810 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1813 depth = ext_depth(inode);
1814 ex = path[depth].p_ext;
1815 if (unlikely(path[depth].p_hdr == NULL)) {
1816 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1820 /* try to insert block into found extent and return */
1821 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1822 && ext4_can_extents_be_merged(inode, ex, newext)) {
1823 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1824 ext4_ext_is_uninitialized(newext),
1825 ext4_ext_get_actual_len(newext),
1826 le32_to_cpu(ex->ee_block),
1827 ext4_ext_is_uninitialized(ex),
1828 ext4_ext_get_actual_len(ex),
1829 ext4_ext_pblock(ex));
1830 err = ext4_ext_get_access(handle, inode, path + depth);
1835 * ext4_can_extents_be_merged should have checked that either
1836 * both extents are uninitialized, or both aren't. Thus we
1837 * need to check only one of them here.
1839 if (ext4_ext_is_uninitialized(ex))
1841 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1842 + ext4_ext_get_actual_len(newext));
1844 ext4_ext_mark_uninitialized(ex);
1845 eh = path[depth].p_hdr;
1850 depth = ext_depth(inode);
1851 eh = path[depth].p_hdr;
1852 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1855 /* probably next leaf has space for us? */
1856 fex = EXT_LAST_EXTENT(eh);
1857 next = EXT_MAX_BLOCKS;
1858 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1859 next = ext4_ext_next_leaf_block(path);
1860 if (next != EXT_MAX_BLOCKS) {
1861 ext_debug("next leaf block - %u\n", next);
1862 BUG_ON(npath != NULL);
1863 npath = ext4_ext_find_extent(inode, next, NULL);
1865 return PTR_ERR(npath);
1866 BUG_ON(npath->p_depth != path->p_depth);
1867 eh = npath[depth].p_hdr;
1868 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1869 ext_debug("next leaf isn't full(%d)\n",
1870 le16_to_cpu(eh->eh_entries));
1874 ext_debug("next leaf has no free space(%d,%d)\n",
1875 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1879 * There is no free space in the found leaf.
1880 * We're gonna add a new leaf in the tree.
1882 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1883 flags = EXT4_MB_USE_ROOT_BLOCKS;
1884 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1887 depth = ext_depth(inode);
1888 eh = path[depth].p_hdr;
1891 nearex = path[depth].p_ext;
1893 err = ext4_ext_get_access(handle, inode, path + depth);
1898 /* there is no extent in this leaf, create first one */
1899 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1900 le32_to_cpu(newext->ee_block),
1901 ext4_ext_pblock(newext),
1902 ext4_ext_is_uninitialized(newext),
1903 ext4_ext_get_actual_len(newext));
1904 nearex = EXT_FIRST_EXTENT(eh);
1906 if (le32_to_cpu(newext->ee_block)
1907 > le32_to_cpu(nearex->ee_block)) {
1909 ext_debug("insert %u:%llu:[%d]%d before: "
1911 le32_to_cpu(newext->ee_block),
1912 ext4_ext_pblock(newext),
1913 ext4_ext_is_uninitialized(newext),
1914 ext4_ext_get_actual_len(newext),
1919 BUG_ON(newext->ee_block == nearex->ee_block);
1920 ext_debug("insert %u:%llu:[%d]%d after: "
1922 le32_to_cpu(newext->ee_block),
1923 ext4_ext_pblock(newext),
1924 ext4_ext_is_uninitialized(newext),
1925 ext4_ext_get_actual_len(newext),
1928 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1930 ext_debug("insert %u:%llu:[%d]%d: "
1931 "move %d extents from 0x%p to 0x%p\n",
1932 le32_to_cpu(newext->ee_block),
1933 ext4_ext_pblock(newext),
1934 ext4_ext_is_uninitialized(newext),
1935 ext4_ext_get_actual_len(newext),
1936 len, nearex, nearex + 1);
1937 memmove(nearex + 1, nearex,
1938 len * sizeof(struct ext4_extent));
1942 le16_add_cpu(&eh->eh_entries, 1);
1943 path[depth].p_ext = nearex;
1944 nearex->ee_block = newext->ee_block;
1945 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1946 nearex->ee_len = newext->ee_len;
1949 /* try to merge extents */
1950 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1951 ext4_ext_try_to_merge(handle, inode, path, nearex);
1954 /* time to correct all indexes above */
1955 err = ext4_ext_correct_indexes(handle, inode, path);
1959 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
1963 ext4_ext_drop_refs(npath);
1966 ext4_ext_invalidate_cache(inode);
1970 static int ext4_fill_fiemap_extents(struct inode *inode,
1971 ext4_lblk_t block, ext4_lblk_t num,
1972 struct fiemap_extent_info *fieinfo)
1974 struct ext4_ext_path *path = NULL;
1975 struct ext4_ext_cache newex;
1976 struct ext4_extent *ex;
1977 ext4_lblk_t next, next_del, start = 0, end = 0;
1978 ext4_lblk_t last = block + num;
1979 int exists, depth = 0, err = 0;
1980 unsigned int flags = 0;
1981 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
1983 while (block < last && block != EXT_MAX_BLOCKS) {
1985 /* find extent for this block */
1986 down_read(&EXT4_I(inode)->i_data_sem);
1988 if (path && ext_depth(inode) != depth) {
1989 /* depth was changed. we have to realloc path */
1994 path = ext4_ext_find_extent(inode, block, path);
1996 up_read(&EXT4_I(inode)->i_data_sem);
1997 err = PTR_ERR(path);
2002 depth = ext_depth(inode);
2003 if (unlikely(path[depth].p_hdr == NULL)) {
2004 up_read(&EXT4_I(inode)->i_data_sem);
2005 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2009 ex = path[depth].p_ext;
2010 next = ext4_ext_next_allocated_block(path);
2011 ext4_ext_drop_refs(path);
2016 /* there is no extent yet, so try to allocate
2017 * all requested space */
2020 } else if (le32_to_cpu(ex->ee_block) > block) {
2021 /* need to allocate space before found extent */
2023 end = le32_to_cpu(ex->ee_block);
2024 if (block + num < end)
2026 } else if (block >= le32_to_cpu(ex->ee_block)
2027 + ext4_ext_get_actual_len(ex)) {
2028 /* need to allocate space after found extent */
2033 } else if (block >= le32_to_cpu(ex->ee_block)) {
2035 * some part of requested space is covered
2039 end = le32_to_cpu(ex->ee_block)
2040 + ext4_ext_get_actual_len(ex);
2041 if (block + num < end)
2047 BUG_ON(end <= start);
2050 newex.ec_block = start;
2051 newex.ec_len = end - start;
2054 newex.ec_block = le32_to_cpu(ex->ee_block);
2055 newex.ec_len = ext4_ext_get_actual_len(ex);
2056 newex.ec_start = ext4_ext_pblock(ex);
2057 if (ext4_ext_is_uninitialized(ex))
2058 flags |= FIEMAP_EXTENT_UNWRITTEN;
2062 * Find delayed extent and update newex accordingly. We call
2063 * it even in !exists case to find out whether newex is the
2064 * last existing extent or not.
2066 next_del = ext4_find_delayed_extent(inode, &newex);
2067 if (!exists && next_del) {
2069 flags |= FIEMAP_EXTENT_DELALLOC;
2071 up_read(&EXT4_I(inode)->i_data_sem);
2073 if (unlikely(newex.ec_len == 0)) {
2074 EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
2079 /* This is possible iff next == next_del == EXT_MAX_BLOCKS */
2080 if (next == next_del) {
2081 flags |= FIEMAP_EXTENT_LAST;
2082 if (unlikely(next_del != EXT_MAX_BLOCKS ||
2083 next != EXT_MAX_BLOCKS)) {
2084 EXT4_ERROR_INODE(inode,
2085 "next extent == %u, next "
2086 "delalloc extent = %u",
2094 err = fiemap_fill_next_extent(fieinfo,
2095 (__u64)newex.ec_block << blksize_bits,
2096 (__u64)newex.ec_start << blksize_bits,
2097 (__u64)newex.ec_len << blksize_bits,
2107 block = newex.ec_block + newex.ec_len;
2111 ext4_ext_drop_refs(path);
2119 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
2120 __u32 len, ext4_fsblk_t start)
2122 struct ext4_ext_cache *cex;
2124 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2125 trace_ext4_ext_put_in_cache(inode, block, len, start);
2126 cex = &EXT4_I(inode)->i_cached_extent;
2127 cex->ec_block = block;
2129 cex->ec_start = start;
2130 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2134 * ext4_ext_put_gap_in_cache:
2135 * calculate boundaries of the gap that the requested block fits into
2136 * and cache this gap
2139 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2142 int depth = ext_depth(inode);
2145 struct ext4_extent *ex;
2147 ex = path[depth].p_ext;
2149 /* there is no extent yet, so gap is [0;-] */
2151 len = EXT_MAX_BLOCKS;
2152 ext_debug("cache gap(whole file):");
2153 } else if (block < le32_to_cpu(ex->ee_block)) {
2155 len = le32_to_cpu(ex->ee_block) - block;
2156 ext_debug("cache gap(before): %u [%u:%u]",
2158 le32_to_cpu(ex->ee_block),
2159 ext4_ext_get_actual_len(ex));
2160 } else if (block >= le32_to_cpu(ex->ee_block)
2161 + ext4_ext_get_actual_len(ex)) {
2163 lblock = le32_to_cpu(ex->ee_block)
2164 + ext4_ext_get_actual_len(ex);
2166 next = ext4_ext_next_allocated_block(path);
2167 ext_debug("cache gap(after): [%u:%u] %u",
2168 le32_to_cpu(ex->ee_block),
2169 ext4_ext_get_actual_len(ex),
2171 BUG_ON(next == lblock);
2172 len = next - lblock;
2178 ext_debug(" -> %u:%lu\n", lblock, len);
2179 ext4_ext_put_in_cache(inode, lblock, len, 0);
2183 * ext4_ext_in_cache()
2184 * Checks to see if the given block is in the cache.
2185 * If it is, the cached extent is stored in the given
2186 * cache extent pointer.
2188 * @inode: The files inode
2189 * @block: The block to look for in the cache
2190 * @ex: Pointer where the cached extent will be stored
2191 * if it contains block
2193 * Return 0 if cache is invalid; 1 if the cache is valid
2196 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2197 struct ext4_extent *ex)
2199 struct ext4_ext_cache *cex;
2203 * We borrow i_block_reservation_lock to protect i_cached_extent
2205 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2206 cex = &EXT4_I(inode)->i_cached_extent;
2208 /* has cache valid data? */
2209 if (cex->ec_len == 0)
2212 if (in_range(block, cex->ec_block, cex->ec_len)) {
2213 ex->ee_block = cpu_to_le32(cex->ec_block);
2214 ext4_ext_store_pblock(ex, cex->ec_start);
2215 ex->ee_len = cpu_to_le16(cex->ec_len);
2216 ext_debug("%u cached by %u:%u:%llu\n",
2218 cex->ec_block, cex->ec_len, cex->ec_start);
2222 trace_ext4_ext_in_cache(inode, block, ret);
2223 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2229 * removes index from the index block.
2231 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2232 struct ext4_ext_path *path, int depth)
2237 /* free index block */
2239 path = path + depth;
2240 leaf = ext4_idx_pblock(path->p_idx);
2241 if (unlikely(path->p_hdr->eh_entries == 0)) {
2242 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2245 err = ext4_ext_get_access(handle, inode, path);
2249 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2250 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2251 len *= sizeof(struct ext4_extent_idx);
2252 memmove(path->p_idx, path->p_idx + 1, len);
2255 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2256 err = ext4_ext_dirty(handle, inode, path);
2259 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2260 trace_ext4_ext_rm_idx(inode, leaf);
2262 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2263 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2265 while (--depth >= 0) {
2266 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2269 err = ext4_ext_get_access(handle, inode, path);
2272 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2273 err = ext4_ext_dirty(handle, inode, path);
2281 * ext4_ext_calc_credits_for_single_extent:
2282 * This routine returns max. credits that needed to insert an extent
2283 * to the extent tree.
2284 * When pass the actual path, the caller should calculate credits
2287 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2288 struct ext4_ext_path *path)
2291 int depth = ext_depth(inode);
2294 /* probably there is space in leaf? */
2295 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2296 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2299 * There are some space in the leaf tree, no
2300 * need to account for leaf block credit
2302 * bitmaps and block group descriptor blocks
2303 * and other metadata blocks still need to be
2306 /* 1 bitmap, 1 block group descriptor */
2307 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2312 return ext4_chunk_trans_blocks(inode, nrblocks);
2316 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2318 * if nrblocks are fit in a single extent (chunk flag is 1), then
2319 * in the worse case, each tree level index/leaf need to be changed
2320 * if the tree split due to insert a new extent, then the old tree
2321 * index/leaf need to be updated too
2323 * If the nrblocks are discontiguous, they could cause
2324 * the whole tree split more than once, but this is really rare.
2326 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2331 /* If we are converting the inline data, only one is needed here. */
2332 if (ext4_has_inline_data(inode))
2335 depth = ext_depth(inode);
2345 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2346 struct ext4_extent *ex,
2347 ext4_fsblk_t *partial_cluster,
2348 ext4_lblk_t from, ext4_lblk_t to)
2350 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2351 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2355 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2356 flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2357 else if (ext4_should_journal_data(inode))
2358 flags |= EXT4_FREE_BLOCKS_FORGET;
2361 * For bigalloc file systems, we never free a partial cluster
2362 * at the beginning of the extent. Instead, we make a note
2363 * that we tried freeing the cluster, and check to see if we
2364 * need to free it on a subsequent call to ext4_remove_blocks,
2365 * or at the end of the ext4_truncate() operation.
2367 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2369 trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2371 * If we have a partial cluster, and it's different from the
2372 * cluster of the last block, we need to explicitly free the
2373 * partial cluster here.
2375 pblk = ext4_ext_pblock(ex) + ee_len - 1;
2376 if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2377 ext4_free_blocks(handle, inode, NULL,
2378 EXT4_C2B(sbi, *partial_cluster),
2379 sbi->s_cluster_ratio, flags);
2380 *partial_cluster = 0;
2383 #ifdef EXTENTS_STATS
2385 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2386 spin_lock(&sbi->s_ext_stats_lock);
2387 sbi->s_ext_blocks += ee_len;
2388 sbi->s_ext_extents++;
2389 if (ee_len < sbi->s_ext_min)
2390 sbi->s_ext_min = ee_len;
2391 if (ee_len > sbi->s_ext_max)
2392 sbi->s_ext_max = ee_len;
2393 if (ext_depth(inode) > sbi->s_depth_max)
2394 sbi->s_depth_max = ext_depth(inode);
2395 spin_unlock(&sbi->s_ext_stats_lock);
2398 if (from >= le32_to_cpu(ex->ee_block)
2399 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2403 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2404 pblk = ext4_ext_pblock(ex) + ee_len - num;
2405 ext_debug("free last %u blocks starting %llu\n", num, pblk);
2406 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2408 * If the block range to be freed didn't start at the
2409 * beginning of a cluster, and we removed the entire
2410 * extent, save the partial cluster here, since we
2411 * might need to delete if we determine that the
2412 * truncate operation has removed all of the blocks in
2415 if (pblk & (sbi->s_cluster_ratio - 1) &&
2417 *partial_cluster = EXT4_B2C(sbi, pblk);
2419 *partial_cluster = 0;
2420 } else if (from == le32_to_cpu(ex->ee_block)
2421 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2427 start = ext4_ext_pblock(ex);
2429 ext_debug("free first %u blocks starting %llu\n", num, start);
2430 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2433 printk(KERN_INFO "strange request: removal(2) "
2434 "%u-%u from %u:%u\n",
2435 from, to, le32_to_cpu(ex->ee_block), ee_len);
2442 * ext4_ext_rm_leaf() Removes the extents associated with the
2443 * blocks appearing between "start" and "end", and splits the extents
2444 * if "start" and "end" appear in the same extent
2446 * @handle: The journal handle
2447 * @inode: The files inode
2448 * @path: The path to the leaf
2449 * @start: The first block to remove
2450 * @end: The last block to remove
2453 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2454 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2455 ext4_lblk_t start, ext4_lblk_t end)
2457 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2458 int err = 0, correct_index = 0;
2459 int depth = ext_depth(inode), credits;
2460 struct ext4_extent_header *eh;
2463 ext4_lblk_t ex_ee_block;
2464 unsigned short ex_ee_len;
2465 unsigned uninitialized = 0;
2466 struct ext4_extent *ex;
2468 /* the header must be checked already in ext4_ext_remove_space() */
2469 ext_debug("truncate since %u in leaf to %u\n", start, end);
2470 if (!path[depth].p_hdr)
2471 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2472 eh = path[depth].p_hdr;
2473 if (unlikely(path[depth].p_hdr == NULL)) {
2474 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2477 /* find where to start removing */
2478 ex = EXT_LAST_EXTENT(eh);
2480 ex_ee_block = le32_to_cpu(ex->ee_block);
2481 ex_ee_len = ext4_ext_get_actual_len(ex);
2483 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2485 while (ex >= EXT_FIRST_EXTENT(eh) &&
2486 ex_ee_block + ex_ee_len > start) {
2488 if (ext4_ext_is_uninitialized(ex))
2493 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2494 uninitialized, ex_ee_len);
2495 path[depth].p_ext = ex;
2497 a = ex_ee_block > start ? ex_ee_block : start;
2498 b = ex_ee_block+ex_ee_len - 1 < end ?
2499 ex_ee_block+ex_ee_len - 1 : end;
2501 ext_debug(" border %u:%u\n", a, b);
2503 /* If this extent is beyond the end of the hole, skip it */
2504 if (end < ex_ee_block) {
2506 ex_ee_block = le32_to_cpu(ex->ee_block);
2507 ex_ee_len = ext4_ext_get_actual_len(ex);
2509 } else if (b != ex_ee_block + ex_ee_len - 1) {
2510 EXT4_ERROR_INODE(inode,
2511 "can not handle truncate %u:%u "
2513 start, end, ex_ee_block,
2514 ex_ee_block + ex_ee_len - 1);
2517 } else if (a != ex_ee_block) {
2518 /* remove tail of the extent */
2519 num = a - ex_ee_block;
2521 /* remove whole extent: excellent! */
2525 * 3 for leaf, sb, and inode plus 2 (bmap and group
2526 * descriptor) for each block group; assume two block
2527 * groups plus ex_ee_len/blocks_per_block_group for
2530 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2531 if (ex == EXT_FIRST_EXTENT(eh)) {
2533 credits += (ext_depth(inode)) + 1;
2535 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2537 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2541 err = ext4_ext_get_access(handle, inode, path + depth);
2545 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2551 /* this extent is removed; mark slot entirely unused */
2552 ext4_ext_store_pblock(ex, 0);
2554 ex->ee_len = cpu_to_le16(num);
2556 * Do not mark uninitialized if all the blocks in the
2557 * extent have been removed.
2559 if (uninitialized && num)
2560 ext4_ext_mark_uninitialized(ex);
2562 * If the extent was completely released,
2563 * we need to remove it from the leaf
2566 if (end != EXT_MAX_BLOCKS - 1) {
2568 * For hole punching, we need to scoot all the
2569 * extents up when an extent is removed so that
2570 * we dont have blank extents in the middle
2572 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2573 sizeof(struct ext4_extent));
2575 /* Now get rid of the one at the end */
2576 memset(EXT_LAST_EXTENT(eh), 0,
2577 sizeof(struct ext4_extent));
2579 le16_add_cpu(&eh->eh_entries, -1);
2581 *partial_cluster = 0;
2583 err = ext4_ext_dirty(handle, inode, path + depth);
2587 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2588 ext4_ext_pblock(ex));
2590 ex_ee_block = le32_to_cpu(ex->ee_block);
2591 ex_ee_len = ext4_ext_get_actual_len(ex);
2594 if (correct_index && eh->eh_entries)
2595 err = ext4_ext_correct_indexes(handle, inode, path);
2598 * If there is still a entry in the leaf node, check to see if
2599 * it references the partial cluster. This is the only place
2600 * where it could; if it doesn't, we can free the cluster.
2602 if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2603 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2604 *partial_cluster)) {
2605 int flags = EXT4_FREE_BLOCKS_FORGET;
2607 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2608 flags |= EXT4_FREE_BLOCKS_METADATA;
2610 ext4_free_blocks(handle, inode, NULL,
2611 EXT4_C2B(sbi, *partial_cluster),
2612 sbi->s_cluster_ratio, flags);
2613 *partial_cluster = 0;
2616 /* if this leaf is free, then we should
2617 * remove it from index block above */
2618 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2619 err = ext4_ext_rm_idx(handle, inode, path, depth);
2626 * ext4_ext_more_to_rm:
2627 * returns 1 if current index has to be freed (even partial)
2630 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2632 BUG_ON(path->p_idx == NULL);
2634 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2638 * if truncate on deeper level happened, it wasn't partial,
2639 * so we have to consider current index for truncation
2641 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2646 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2649 struct super_block *sb = inode->i_sb;
2650 int depth = ext_depth(inode);
2651 struct ext4_ext_path *path = NULL;
2652 ext4_fsblk_t partial_cluster = 0;
2656 ext_debug("truncate since %u to %u\n", start, end);
2658 /* probably first extent we're gonna free will be last in block */
2659 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2661 return PTR_ERR(handle);
2664 ext4_ext_invalidate_cache(inode);
2666 trace_ext4_ext_remove_space(inode, start, depth);
2669 * Check if we are removing extents inside the extent tree. If that
2670 * is the case, we are going to punch a hole inside the extent tree
2671 * so we have to check whether we need to split the extent covering
2672 * the last block to remove so we can easily remove the part of it
2673 * in ext4_ext_rm_leaf().
2675 if (end < EXT_MAX_BLOCKS - 1) {
2676 struct ext4_extent *ex;
2677 ext4_lblk_t ee_block;
2679 /* find extent for this block */
2680 path = ext4_ext_find_extent(inode, end, NULL);
2682 ext4_journal_stop(handle);
2683 return PTR_ERR(path);
2685 depth = ext_depth(inode);
2686 /* Leaf not may not exist only if inode has no blocks at all */
2687 ex = path[depth].p_ext;
2690 EXT4_ERROR_INODE(inode,
2691 "path[%d].p_hdr == NULL",
2698 ee_block = le32_to_cpu(ex->ee_block);
2701 * See if the last block is inside the extent, if so split
2702 * the extent at 'end' block so we can easily remove the
2703 * tail of the first part of the split extent in
2704 * ext4_ext_rm_leaf().
2706 if (end >= ee_block &&
2707 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2710 if (ext4_ext_is_uninitialized(ex))
2711 split_flag = EXT4_EXT_MARK_UNINIT1 |
2712 EXT4_EXT_MARK_UNINIT2;
2715 * Split the extent in two so that 'end' is the last
2716 * block in the first new extent
2718 err = ext4_split_extent_at(handle, inode, path,
2719 end + 1, split_flag,
2720 EXT4_GET_BLOCKS_PRE_IO |
2721 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2728 * We start scanning from right side, freeing all the blocks
2729 * after i_size and walking into the tree depth-wise.
2731 depth = ext_depth(inode);
2736 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2738 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2741 ext4_journal_stop(handle);
2744 path[0].p_depth = depth;
2745 path[0].p_hdr = ext_inode_hdr(inode);
2748 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2755 while (i >= 0 && err == 0) {
2757 /* this is leaf block */
2758 err = ext4_ext_rm_leaf(handle, inode, path,
2759 &partial_cluster, start,
2761 /* root level has p_bh == NULL, brelse() eats this */
2762 brelse(path[i].p_bh);
2763 path[i].p_bh = NULL;
2768 /* this is index block */
2769 if (!path[i].p_hdr) {
2770 ext_debug("initialize header\n");
2771 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2774 if (!path[i].p_idx) {
2775 /* this level hasn't been touched yet */
2776 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2777 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2778 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2780 le16_to_cpu(path[i].p_hdr->eh_entries));
2782 /* we were already here, see at next index */
2786 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2787 i, EXT_FIRST_INDEX(path[i].p_hdr),
2789 if (ext4_ext_more_to_rm(path + i)) {
2790 struct buffer_head *bh;
2791 /* go to the next level */
2792 ext_debug("move to level %d (block %llu)\n",
2793 i + 1, ext4_idx_pblock(path[i].p_idx));
2794 memset(path + i + 1, 0, sizeof(*path));
2795 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2797 /* should we reset i_size? */
2801 if (WARN_ON(i + 1 > depth)) {
2805 if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2806 depth - i - 1, bh)) {
2810 path[i + 1].p_bh = bh;
2812 /* save actual number of indexes since this
2813 * number is changed at the next iteration */
2814 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2817 /* we finished processing this index, go up */
2818 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2819 /* index is empty, remove it;
2820 * handle must be already prepared by the
2821 * truncatei_leaf() */
2822 err = ext4_ext_rm_idx(handle, inode, path, i);
2824 /* root level has p_bh == NULL, brelse() eats this */
2825 brelse(path[i].p_bh);
2826 path[i].p_bh = NULL;
2828 ext_debug("return to level %d\n", i);
2832 trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2833 path->p_hdr->eh_entries);
2835 /* If we still have something in the partial cluster and we have removed
2836 * even the first extent, then we should free the blocks in the partial
2837 * cluster as well. */
2838 if (partial_cluster && path->p_hdr->eh_entries == 0) {
2839 int flags = EXT4_FREE_BLOCKS_FORGET;
2841 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2842 flags |= EXT4_FREE_BLOCKS_METADATA;
2844 ext4_free_blocks(handle, inode, NULL,
2845 EXT4_C2B(EXT4_SB(sb), partial_cluster),
2846 EXT4_SB(sb)->s_cluster_ratio, flags);
2847 partial_cluster = 0;
2850 /* TODO: flexible tree reduction should be here */
2851 if (path->p_hdr->eh_entries == 0) {
2853 * truncate to zero freed all the tree,
2854 * so we need to correct eh_depth
2856 err = ext4_ext_get_access(handle, inode, path);
2858 ext_inode_hdr(inode)->eh_depth = 0;
2859 ext_inode_hdr(inode)->eh_max =
2860 cpu_to_le16(ext4_ext_space_root(inode, 0));
2861 err = ext4_ext_dirty(handle, inode, path);
2865 ext4_ext_drop_refs(path);
2867 if (err == -EAGAIN) {
2871 ext4_journal_stop(handle);
2877 * called at mount time
2879 void ext4_ext_init(struct super_block *sb)
2882 * possible initialization would be here
2885 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2886 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2887 printk(KERN_INFO "EXT4-fs: file extents enabled"
2888 #ifdef AGGRESSIVE_TEST
2889 ", aggressive tests"
2891 #ifdef CHECK_BINSEARCH
2894 #ifdef EXTENTS_STATS
2899 #ifdef EXTENTS_STATS
2900 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2901 EXT4_SB(sb)->s_ext_min = 1 << 30;
2902 EXT4_SB(sb)->s_ext_max = 0;
2908 * called at umount time
2910 void ext4_ext_release(struct super_block *sb)
2912 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2915 #ifdef EXTENTS_STATS
2916 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2917 struct ext4_sb_info *sbi = EXT4_SB(sb);
2918 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2919 sbi->s_ext_blocks, sbi->s_ext_extents,
2920 sbi->s_ext_blocks / sbi->s_ext_extents);
2921 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2922 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2927 /* FIXME!! we need to try to merge to left or right after zero-out */
2928 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2930 ext4_fsblk_t ee_pblock;
2931 unsigned int ee_len;
2934 ee_len = ext4_ext_get_actual_len(ex);
2935 ee_pblock = ext4_ext_pblock(ex);
2937 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2945 * ext4_split_extent_at() splits an extent at given block.
2947 * @handle: the journal handle
2948 * @inode: the file inode
2949 * @path: the path to the extent
2950 * @split: the logical block where the extent is splitted.
2951 * @split_flags: indicates if the extent could be zeroout if split fails, and
2952 * the states(init or uninit) of new extents.
2953 * @flags: flags used to insert new extent to extent tree.
2956 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2957 * of which are deterimined by split_flag.
2959 * There are two cases:
2960 * a> the extent are splitted into two extent.
2961 * b> split is not needed, and just mark the extent.
2963 * return 0 on success.
2965 static int ext4_split_extent_at(handle_t *handle,
2966 struct inode *inode,
2967 struct ext4_ext_path *path,
2972 ext4_fsblk_t newblock;
2973 ext4_lblk_t ee_block;
2974 struct ext4_extent *ex, newex, orig_ex;
2975 struct ext4_extent *ex2 = NULL;
2976 unsigned int ee_len, depth;
2979 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
2980 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
2982 ext_debug("ext4_split_extents_at: inode %lu, logical"
2983 "block %llu\n", inode->i_ino, (unsigned long long)split);
2985 ext4_ext_show_leaf(inode, path);
2987 depth = ext_depth(inode);
2988 ex = path[depth].p_ext;
2989 ee_block = le32_to_cpu(ex->ee_block);
2990 ee_len = ext4_ext_get_actual_len(ex);
2991 newblock = split - ee_block + ext4_ext_pblock(ex);
2993 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2995 err = ext4_ext_get_access(handle, inode, path + depth);
2999 if (split == ee_block) {
3001 * case b: block @split is the block that the extent begins with
3002 * then we just change the state of the extent, and splitting
3005 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3006 ext4_ext_mark_uninitialized(ex);
3008 ext4_ext_mark_initialized(ex);
3010 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3011 ext4_ext_try_to_merge(handle, inode, path, ex);
3013 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3018 memcpy(&orig_ex, ex, sizeof(orig_ex));
3019 ex->ee_len = cpu_to_le16(split - ee_block);
3020 if (split_flag & EXT4_EXT_MARK_UNINIT1)
3021 ext4_ext_mark_uninitialized(ex);
3024 * path may lead to new leaf, not to original leaf any more
3025 * after ext4_ext_insert_extent() returns,
3027 err = ext4_ext_dirty(handle, inode, path + depth);
3029 goto fix_extent_len;
3032 ex2->ee_block = cpu_to_le32(split);
3033 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3034 ext4_ext_store_pblock(ex2, newblock);
3035 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3036 ext4_ext_mark_uninitialized(ex2);
3038 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3039 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3040 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3041 if (split_flag & EXT4_EXT_DATA_VALID1)
3042 err = ext4_ext_zeroout(inode, ex2);
3044 err = ext4_ext_zeroout(inode, ex);
3046 err = ext4_ext_zeroout(inode, &orig_ex);
3049 goto fix_extent_len;
3050 /* update the extent length and mark as initialized */
3051 ex->ee_len = cpu_to_le16(ee_len);
3052 ext4_ext_try_to_merge(handle, inode, path, ex);
3053 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3056 goto fix_extent_len;
3059 ext4_ext_show_leaf(inode, path);
3063 ex->ee_len = orig_ex.ee_len;
3064 ext4_ext_dirty(handle, inode, path + depth);
3069 * ext4_split_extents() splits an extent and mark extent which is covered
3070 * by @map as split_flags indicates
3072 * It may result in splitting the extent into multiple extents (upto three)
3073 * There are three possibilities:
3074 * a> There is no split required
3075 * b> Splits in two extents: Split is happening at either end of the extent
3076 * c> Splits in three extents: Somone is splitting in middle of the extent
3079 static int ext4_split_extent(handle_t *handle,
3080 struct inode *inode,
3081 struct ext4_ext_path *path,
3082 struct ext4_map_blocks *map,
3086 ext4_lblk_t ee_block;
3087 struct ext4_extent *ex;
3088 unsigned int ee_len, depth;
3091 int split_flag1, flags1;
3093 depth = ext_depth(inode);
3094 ex = path[depth].p_ext;
3095 ee_block = le32_to_cpu(ex->ee_block);
3096 ee_len = ext4_ext_get_actual_len(ex);
3097 uninitialized = ext4_ext_is_uninitialized(ex);
3099 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3100 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3101 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3103 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3104 EXT4_EXT_MARK_UNINIT2;
3105 if (split_flag & EXT4_EXT_DATA_VALID2)
3106 split_flag1 |= EXT4_EXT_DATA_VALID1;
3107 err = ext4_split_extent_at(handle, inode, path,
3108 map->m_lblk + map->m_len, split_flag1, flags1);
3113 ext4_ext_drop_refs(path);
3114 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3116 return PTR_ERR(path);
3118 if (map->m_lblk >= ee_block) {
3119 split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
3120 EXT4_EXT_DATA_VALID2);
3122 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3123 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3124 split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3125 err = ext4_split_extent_at(handle, inode, path,
3126 map->m_lblk, split_flag1, flags);
3131 ext4_ext_show_leaf(inode, path);
3133 return err ? err : map->m_len;
3137 * This function is called by ext4_ext_map_blocks() if someone tries to write
3138 * to an uninitialized extent. It may result in splitting the uninitialized
3139 * extent into multiple extents (up to three - one initialized and two
3141 * There are three possibilities:
3142 * a> There is no split required: Entire extent should be initialized
3143 * b> Splits in two extents: Write is happening at either end of the extent
3144 * c> Splits in three extents: Somone is writing in middle of the extent
3147 * - The extent pointed to by 'path' is uninitialized.
3148 * - The extent pointed to by 'path' contains a superset
3149 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3151 * Post-conditions on success:
3152 * - the returned value is the number of blocks beyond map->l_lblk
3153 * that are allocated and initialized.
3154 * It is guaranteed to be >= map->m_len.
3156 static int ext4_ext_convert_to_initialized(handle_t *handle,
3157 struct inode *inode,
3158 struct ext4_map_blocks *map,
3159 struct ext4_ext_path *path)
3161 struct ext4_sb_info *sbi;
3162 struct ext4_extent_header *eh;
3163 struct ext4_map_blocks split_map;
3164 struct ext4_extent zero_ex;
3165 struct ext4_extent *ex;
3166 ext4_lblk_t ee_block, eof_block;
3167 unsigned int ee_len, depth;
3168 int allocated, max_zeroout = 0;
3172 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3173 "block %llu, max_blocks %u\n", inode->i_ino,
3174 (unsigned long long)map->m_lblk, map->m_len);
3176 sbi = EXT4_SB(inode->i_sb);
3177 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3178 inode->i_sb->s_blocksize_bits;
3179 if (eof_block < map->m_lblk + map->m_len)
3180 eof_block = map->m_lblk + map->m_len;
3182 depth = ext_depth(inode);
3183 eh = path[depth].p_hdr;
3184 ex = path[depth].p_ext;
3185 ee_block = le32_to_cpu(ex->ee_block);
3186 ee_len = ext4_ext_get_actual_len(ex);
3187 allocated = ee_len - (map->m_lblk - ee_block);
3189 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3191 /* Pre-conditions */
3192 BUG_ON(!ext4_ext_is_uninitialized(ex));
3193 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3196 * Attempt to transfer newly initialized blocks from the currently
3197 * uninitialized extent to its left neighbor. This is much cheaper
3198 * than an insertion followed by a merge as those involve costly
3199 * memmove() calls. This is the common case in steady state for
3200 * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3203 * Limitations of the current logic:
3204 * - L1: we only deal with writes at the start of the extent.
3205 * The approach could be extended to writes at the end
3206 * of the extent but this scenario was deemed less common.
3207 * - L2: we do not deal with writes covering the whole extent.
3208 * This would require removing the extent if the transfer
3210 * - L3: we only attempt to merge with an extent stored in the
3211 * same extent tree node.
3213 if ((map->m_lblk == ee_block) && /*L1*/
3214 (map->m_len < ee_len) && /*L2*/
3215 (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/
3216 struct ext4_extent *prev_ex;
3217 ext4_lblk_t prev_lblk;
3218 ext4_fsblk_t prev_pblk, ee_pblk;
3219 unsigned int prev_len, write_len;
3222 prev_lblk = le32_to_cpu(prev_ex->ee_block);
3223 prev_len = ext4_ext_get_actual_len(prev_ex);
3224 prev_pblk = ext4_ext_pblock(prev_ex);
3225 ee_pblk = ext4_ext_pblock(ex);
3226 write_len = map->m_len;
3229 * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3230 * upon those conditions:
3231 * - C1: prev_ex is initialized,
3232 * - C2: prev_ex is logically abutting ex,
3233 * - C3: prev_ex is physically abutting ex,
3234 * - C4: prev_ex can receive the additional blocks without
3235 * overflowing the (initialized) length limit.
3237 if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/
3238 ((prev_lblk + prev_len) == ee_block) && /*C2*/
3239 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3240 (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/
3241 err = ext4_ext_get_access(handle, inode, path + depth);
3245 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3248 /* Shift the start of ex by 'write_len' blocks */
3249 ex->ee_block = cpu_to_le32(ee_block + write_len);
3250 ext4_ext_store_pblock(ex, ee_pblk + write_len);
3251 ex->ee_len = cpu_to_le16(ee_len - write_len);
3252 ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3254 /* Extend prev_ex by 'write_len' blocks */
3255 prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3257 /* Mark the block containing both extents as dirty */
3258 ext4_ext_dirty(handle, inode, path + depth);
3260 /* Update path to point to the right extent */
3261 path[depth].p_ext = prev_ex;
3263 /* Result: number of initialized blocks past m_lblk */
3264 allocated = write_len;
3269 WARN_ON(map->m_lblk < ee_block);
3271 * It is safe to convert extent to initialized via explicit
3272 * zeroout only if extent is fully insde i_size or new_size.
3274 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3276 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3277 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3278 inode->i_sb->s_blocksize_bits;
3280 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3281 if (max_zeroout && (ee_len <= max_zeroout)) {
3282 err = ext4_ext_zeroout(inode, ex);
3286 err = ext4_ext_get_access(handle, inode, path + depth);
3289 ext4_ext_mark_initialized(ex);
3290 ext4_ext_try_to_merge(handle, inode, path, ex);
3291 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3297 * 1. split the extent into three extents.
3298 * 2. split the extent into two extents, zeroout the first half.
3299 * 3. split the extent into two extents, zeroout the second half.
3300 * 4. split the extent into two extents with out zeroout.
3302 split_map.m_lblk = map->m_lblk;
3303 split_map.m_len = map->m_len;
3305 if (max_zeroout && (allocated > map->m_len)) {
3306 if (allocated <= max_zeroout) {
3309 cpu_to_le32(map->m_lblk);
3310 zero_ex.ee_len = cpu_to_le16(allocated);
3311 ext4_ext_store_pblock(&zero_ex,
3312 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3313 err = ext4_ext_zeroout(inode, &zero_ex);
3316 split_map.m_lblk = map->m_lblk;
3317 split_map.m_len = allocated;
3318 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3320 if (map->m_lblk != ee_block) {
3321 zero_ex.ee_block = ex->ee_block;
3322 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3324 ext4_ext_store_pblock(&zero_ex,
3325 ext4_ext_pblock(ex));
3326 err = ext4_ext_zeroout(inode, &zero_ex);
3331 split_map.m_lblk = ee_block;
3332 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3333 allocated = map->m_len;
3337 allocated = ext4_split_extent(handle, inode, path,
3338 &split_map, split_flag, 0);
3343 return err ? err : allocated;
3347 * This function is called by ext4_ext_map_blocks() from
3348 * ext4_get_blocks_dio_write() when DIO to write
3349 * to an uninitialized extent.
3351 * Writing to an uninitialized extent may result in splitting the uninitialized
3352 * extent into multiple initialized/uninitialized extents (up to three)
3353 * There are three possibilities:
3354 * a> There is no split required: Entire extent should be uninitialized
3355 * b> Splits in two extents: Write is happening at either end of the extent
3356 * c> Splits in three extents: Somone is writing in middle of the extent
3358 * One of more index blocks maybe needed if the extent tree grow after
3359 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3360 * complete, we need to split the uninitialized extent before DIO submit
3361 * the IO. The uninitialized extent called at this time will be split
3362 * into three uninitialized extent(at most). After IO complete, the part
3363 * being filled will be convert to initialized by the end_io callback function
3364 * via ext4_convert_unwritten_extents().
3366 * Returns the size of uninitialized extent to be written on success.
3368 static int ext4_split_unwritten_extents(handle_t *handle,
3369 struct inode *inode,
3370 struct ext4_map_blocks *map,
3371 struct ext4_ext_path *path,
3374 ext4_lblk_t eof_block;
3375 ext4_lblk_t ee_block;
3376 struct ext4_extent *ex;
3377 unsigned int ee_len;
3378 int split_flag = 0, depth;
3380 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3381 "block %llu, max_blocks %u\n", inode->i_ino,
3382 (unsigned long long)map->m_lblk, map->m_len);
3384 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3385 inode->i_sb->s_blocksize_bits;
3386 if (eof_block < map->m_lblk + map->m_len)
3387 eof_block = map->m_lblk + map->m_len;
3389 * It is safe to convert extent to initialized via explicit
3390 * zeroout only if extent is fully insde i_size or new_size.
3392 depth = ext_depth(inode);
3393 ex = path[depth].p_ext;
3394 ee_block = le32_to_cpu(ex->ee_block);
3395 ee_len = ext4_ext_get_actual_len(ex);
3397 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3398 split_flag |= EXT4_EXT_MARK_UNINIT2;
3399 if (flags & EXT4_GET_BLOCKS_CONVERT)
3400 split_flag |= EXT4_EXT_DATA_VALID2;
3401 flags |= EXT4_GET_BLOCKS_PRE_IO;
3402 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3405 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3406 struct inode *inode,
3407 struct ext4_map_blocks *map,
3408 struct ext4_ext_path *path)
3410 struct ext4_extent *ex;
3411 ext4_lblk_t ee_block;
3412 unsigned int ee_len;
3416 depth = ext_depth(inode);
3417 ex = path[depth].p_ext;
3418 ee_block = le32_to_cpu(ex->ee_block);
3419 ee_len = ext4_ext_get_actual_len(ex);
3421 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3422 "block %llu, max_blocks %u\n", inode->i_ino,
3423 (unsigned long long)ee_block, ee_len);
3425 /* If extent is larger than requested then split is required */
3426 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3427 err = ext4_split_unwritten_extents(handle, inode, map, path,
3428 EXT4_GET_BLOCKS_CONVERT);
3431 ext4_ext_drop_refs(path);
3432 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3434 err = PTR_ERR(path);
3437 depth = ext_depth(inode);
3438 ex = path[depth].p_ext;
3441 err = ext4_ext_get_access(handle, inode, path + depth);
3444 /* first mark the extent as initialized */
3445 ext4_ext_mark_initialized(ex);
3447 /* note: ext4_ext_correct_indexes() isn't needed here because
3448 * borders are not changed
3450 ext4_ext_try_to_merge(handle, inode, path, ex);
3452 /* Mark modified extent as dirty */
3453 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3455 ext4_ext_show_leaf(inode, path);
3459 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3460 sector_t block, int count)
3463 for (i = 0; i < count; i++)
3464 unmap_underlying_metadata(bdev, block + i);
3468 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3470 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3472 struct ext4_ext_path *path,
3476 struct ext4_extent_header *eh;
3477 struct ext4_extent *last_ex;
3479 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3482 depth = ext_depth(inode);
3483 eh = path[depth].p_hdr;
3486 * We're going to remove EOFBLOCKS_FL entirely in future so we
3487 * do not care for this case anymore. Simply remove the flag
3488 * if there are no extents.
3490 if (unlikely(!eh->eh_entries))
3492 last_ex = EXT_LAST_EXTENT(eh);
3494 * We should clear the EOFBLOCKS_FL flag if we are writing the
3495 * last block in the last extent in the file. We test this by
3496 * first checking to see if the caller to
3497 * ext4_ext_get_blocks() was interested in the last block (or
3498 * a block beyond the last block) in the current extent. If
3499 * this turns out to be false, we can bail out from this
3500 * function immediately.
3502 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3503 ext4_ext_get_actual_len(last_ex))
3506 * If the caller does appear to be planning to write at or
3507 * beyond the end of the current extent, we then test to see
3508 * if the current extent is the last extent in the file, by
3509 * checking to make sure it was reached via the rightmost node
3510 * at each level of the tree.
3512 for (i = depth-1; i >= 0; i--)
3513 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3516 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3517 return ext4_mark_inode_dirty(handle, inode);
3521 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3523 * Return 1 if there is a delalloc block in the range, otherwise 0.
3525 static int ext4_find_delalloc_range(struct inode *inode,
3526 ext4_lblk_t lblk_start,
3527 ext4_lblk_t lblk_end)
3529 struct extent_status es;
3531 ext4_es_find_delayed_extent(inode, lblk_start, &es);
3533 return 0; /* there is no delay extent in this tree */
3534 else if (es.es_lblk <= lblk_start &&
3535 lblk_start < es.es_lblk + es.es_len)
3537 else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
3543 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3545 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3546 ext4_lblk_t lblk_start, lblk_end;
3547 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3548 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3550 return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3554 * Determines how many complete clusters (out of those specified by the 'map')
3555 * are under delalloc and were reserved quota for.
3556 * This function is called when we are writing out the blocks that were
3557 * originally written with their allocation delayed, but then the space was
3558 * allocated using fallocate() before the delayed allocation could be resolved.
3559 * The cases to look for are:
3560 * ('=' indicated delayed allocated blocks
3561 * '-' indicates non-delayed allocated blocks)
3562 * (a) partial clusters towards beginning and/or end outside of allocated range
3563 * are not delalloc'ed.
3565 * |----c---=|====c====|====c====|===-c----|
3566 * |++++++ allocated ++++++|
3567 * ==> 4 complete clusters in above example
3569 * (b) partial cluster (outside of allocated range) towards either end is
3570 * marked for delayed allocation. In this case, we will exclude that
3573 * |----====c========|========c========|
3574 * |++++++ allocated ++++++|
3575 * ==> 1 complete clusters in above example
3578 * |================c================|
3579 * |++++++ allocated ++++++|
3580 * ==> 0 complete clusters in above example
3582 * The ext4_da_update_reserve_space will be called only if we
3583 * determine here that there were some "entire" clusters that span
3584 * this 'allocated' range.
3585 * In the non-bigalloc case, this function will just end up returning num_blks
3586 * without ever calling ext4_find_delalloc_range.
3589 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3590 unsigned int num_blks)
3592 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3593 ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3594 ext4_lblk_t lblk_from, lblk_to, c_offset;
3595 unsigned int allocated_clusters = 0;
3597 alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3598 alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3600 /* max possible clusters for this allocation */
3601 allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3603 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3605 /* Check towards left side */
3606 c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3608 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3609 lblk_to = lblk_from + c_offset - 1;
3611 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3612 allocated_clusters--;
3615 /* Now check towards right. */
3616 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3617 if (allocated_clusters && c_offset) {
3618 lblk_from = lblk_start + num_blks;
3619 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3621 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3622 allocated_clusters--;
3625 return allocated_clusters;
3629 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3630 struct ext4_map_blocks *map,
3631 struct ext4_ext_path *path, int flags,
3632 unsigned int allocated, ext4_fsblk_t newblock)
3636 ext4_io_end_t *io = ext4_inode_aio(inode);
3638 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3639 "block %llu, max_blocks %u, flags %x, allocated %u\n",
3640 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3642 ext4_ext_show_leaf(inode, path);
3644 trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3645 allocated, newblock);
3647 /* get_block() before submit the IO, split the extent */
3648 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3649 ret = ext4_split_unwritten_extents(handle, inode, map,
3654 * Flag the inode(non aio case) or end_io struct (aio case)
3655 * that this IO needs to conversion to written when IO is
3659 ext4_set_io_unwritten_flag(inode, io);
3661 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3662 if (ext4_should_dioread_nolock(inode))
3663 map->m_flags |= EXT4_MAP_UNINIT;
3666 /* IO end_io complete, convert the filled extent to written */
3667 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3668 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3671 ext4_update_inode_fsync_trans(handle, inode, 1);
3672 err = check_eofblocks_fl(handle, inode, map->m_lblk,
3678 /* buffered IO case */
3680 * repeat fallocate creation request
3681 * we already have an unwritten extent
3683 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3686 /* buffered READ or buffered write_begin() lookup */
3687 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3689 * We have blocks reserved already. We
3690 * return allocated blocks so that delalloc
3691 * won't do block reservation for us. But
3692 * the buffer head will be unmapped so that
3693 * a read from the block returns 0s.
3695 map->m_flags |= EXT4_MAP_UNWRITTEN;
3699 /* buffered write, writepage time, convert*/
3700 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3702 ext4_update_inode_fsync_trans(handle, inode, 1);
3709 map->m_flags |= EXT4_MAP_NEW;
3711 * if we allocated more blocks than requested
3712 * we need to make sure we unmap the extra block
3713 * allocated. The actual needed block will get
3714 * unmapped later when we find the buffer_head marked
3717 if (allocated > map->m_len) {
3718 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3719 newblock + map->m_len,
3720 allocated - map->m_len);
3721 allocated = map->m_len;
3725 * If we have done fallocate with the offset that is already
3726 * delayed allocated, we would have block reservation
3727 * and quota reservation done in the delayed write path.
3728 * But fallocate would have already updated quota and block
3729 * count for this offset. So cancel these reservation
3731 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3732 unsigned int reserved_clusters;
3733 reserved_clusters = get_reserved_cluster_alloc(inode,
3734 map->m_lblk, map->m_len);
3735 if (reserved_clusters)
3736 ext4_da_update_reserve_space(inode,
3742 map->m_flags |= EXT4_MAP_MAPPED;
3743 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3744 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3750 if (allocated > map->m_len)
3751 allocated = map->m_len;
3752 ext4_ext_show_leaf(inode, path);
3753 map->m_pblk = newblock;
3754 map->m_len = allocated;
3757 ext4_ext_drop_refs(path);
3760 return err ? err : allocated;
3764 * get_implied_cluster_alloc - check to see if the requested
3765 * allocation (in the map structure) overlaps with a cluster already
3766 * allocated in an extent.
3767 * @sb The filesystem superblock structure
3768 * @map The requested lblk->pblk mapping
3769 * @ex The extent structure which might contain an implied
3770 * cluster allocation
3772 * This function is called by ext4_ext_map_blocks() after we failed to
3773 * find blocks that were already in the inode's extent tree. Hence,
3774 * we know that the beginning of the requested region cannot overlap
3775 * the extent from the inode's extent tree. There are three cases we
3776 * want to catch. The first is this case:
3778 * |--- cluster # N--|
3779 * |--- extent ---| |---- requested region ---|
3782 * The second case that we need to test for is this one:
3784 * |--------- cluster # N ----------------|
3785 * |--- requested region --| |------- extent ----|
3786 * |=======================|
3788 * The third case is when the requested region lies between two extents
3789 * within the same cluster:
3790 * |------------- cluster # N-------------|
3791 * |----- ex -----| |---- ex_right ----|
3792 * |------ requested region ------|
3793 * |================|
3795 * In each of the above cases, we need to set the map->m_pblk and
3796 * map->m_len so it corresponds to the return the extent labelled as
3797 * "|====|" from cluster #N, since it is already in use for data in
3798 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
3799 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3800 * as a new "allocated" block region. Otherwise, we will return 0 and
3801 * ext4_ext_map_blocks() will then allocate one or more new clusters
3802 * by calling ext4_mb_new_blocks().
3804 static int get_implied_cluster_alloc(struct super_block *sb,
3805 struct ext4_map_blocks *map,
3806 struct ext4_extent *ex,
3807 struct ext4_ext_path *path)
3809 struct ext4_sb_info *sbi = EXT4_SB(sb);
3810 ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3811 ext4_lblk_t ex_cluster_start, ex_cluster_end;
3812 ext4_lblk_t rr_cluster_start;
3813 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3814 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3815 unsigned short ee_len = ext4_ext_get_actual_len(ex);
3817 /* The extent passed in that we are trying to match */
3818 ex_cluster_start = EXT4_B2C(sbi, ee_block);
3819 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3821 /* The requested region passed into ext4_map_blocks() */
3822 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3824 if ((rr_cluster_start == ex_cluster_end) ||
3825 (rr_cluster_start == ex_cluster_start)) {
3826 if (rr_cluster_start == ex_cluster_end)
3827 ee_start += ee_len - 1;
3828 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3830 map->m_len = min(map->m_len,
3831 (unsigned) sbi->s_cluster_ratio - c_offset);
3833 * Check for and handle this case:
3835 * |--------- cluster # N-------------|
3836 * |------- extent ----|
3837 * |--- requested region ---|
3841 if (map->m_lblk < ee_block)
3842 map->m_len = min(map->m_len, ee_block - map->m_lblk);
3845 * Check for the case where there is already another allocated
3846 * block to the right of 'ex' but before the end of the cluster.
3848 * |------------- cluster # N-------------|
3849 * |----- ex -----| |---- ex_right ----|
3850 * |------ requested region ------|
3851 * |================|
3853 if (map->m_lblk > ee_block) {
3854 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3855 map->m_len = min(map->m_len, next - map->m_lblk);
3858 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3862 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3868 * Block allocation/map/preallocation routine for extents based files
3871 * Need to be called with
3872 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3873 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3875 * return > 0, number of of blocks already mapped/allocated
3876 * if create == 0 and these are pre-allocated blocks
3877 * buffer head is unmapped
3878 * otherwise blocks are mapped
3880 * return = 0, if plain look up failed (blocks have not been allocated)
3881 * buffer head is unmapped
3883 * return < 0, error case.
3885 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3886 struct ext4_map_blocks *map, int flags)
3888 struct ext4_ext_path *path = NULL;
3889 struct ext4_extent newex, *ex, *ex2;
3890 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3891 ext4_fsblk_t newblock = 0;
3892 int free_on_err = 0, err = 0, depth;
3893 unsigned int allocated = 0, offset = 0;
3894 unsigned int allocated_clusters = 0;
3895 struct ext4_allocation_request ar;
3896 ext4_io_end_t *io = ext4_inode_aio(inode);
3897 ext4_lblk_t cluster_offset;
3898 int set_unwritten = 0;
3900 ext_debug("blocks %u/%u requested for inode %lu\n",
3901 map->m_lblk, map->m_len, inode->i_ino);
3902 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3904 /* check in cache */
3905 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3906 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3907 if ((sbi->s_cluster_ratio > 1) &&
3908 ext4_find_delalloc_cluster(inode, map->m_lblk))
3909 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3911 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3913 * block isn't allocated yet and
3914 * user doesn't want to allocate it
3918 /* we should allocate requested block */
3920 /* block is already allocated */
3921 if (sbi->s_cluster_ratio > 1)
3922 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3923 newblock = map->m_lblk
3924 - le32_to_cpu(newex.ee_block)
3925 + ext4_ext_pblock(&newex);
3926 /* number of remaining blocks in the extent */
3927 allocated = ext4_ext_get_actual_len(&newex) -
3928 (map->m_lblk - le32_to_cpu(newex.ee_block));
3933 /* find extent for this block */
3934 path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3936 err = PTR_ERR(path);
3941 depth = ext_depth(inode);
3944 * consistent leaf must not be empty;
3945 * this situation is possible, though, _during_ tree modification;
3946 * this is why assert can't be put in ext4_ext_find_extent()
3948 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3949 EXT4_ERROR_INODE(inode, "bad extent address "
3950 "lblock: %lu, depth: %d pblock %lld",
3951 (unsigned long) map->m_lblk, depth,
3952 path[depth].p_block);
3957 ex = path[depth].p_ext;
3959 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3960 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3961 unsigned short ee_len;
3964 * Uninitialized extents are treated as holes, except that
3965 * we split out initialized portions during a write.
3967 ee_len = ext4_ext_get_actual_len(ex);
3969 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3971 /* if found extent covers block, simply return it */
3972 if (in_range(map->m_lblk, ee_block, ee_len)) {
3973 newblock = map->m_lblk - ee_block + ee_start;
3974 /* number of remaining blocks in the extent */
3975 allocated = ee_len - (map->m_lblk - ee_block);
3976 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3977 ee_block, ee_len, newblock);
3980 * Do not put uninitialized extent
3983 if (!ext4_ext_is_uninitialized(ex)) {
3984 ext4_ext_put_in_cache(inode, ee_block,
3988 allocated = ext4_ext_handle_uninitialized_extents(
3989 handle, inode, map, path, flags,
3990 allocated, newblock);
3995 if ((sbi->s_cluster_ratio > 1) &&
3996 ext4_find_delalloc_cluster(inode, map->m_lblk))
3997 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4000 * requested block isn't allocated yet;
4001 * we couldn't try to create block if create flag is zero
4003 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4005 * put just found gap into cache to speed up
4006 * subsequent requests
4008 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4013 * Okay, we need to do block allocation.
4015 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4016 newex.ee_block = cpu_to_le32(map->m_lblk);
4017 cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4020 * If we are doing bigalloc, check to see if the extent returned
4021 * by ext4_ext_find_extent() implies a cluster we can use.
4023 if (cluster_offset && ex &&
4024 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4025 ar.len = allocated = map->m_len;
4026 newblock = map->m_pblk;
4027 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4028 goto got_allocated_blocks;
4031 /* find neighbour allocated blocks */
4032 ar.lleft = map->m_lblk;
4033 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4036 ar.lright = map->m_lblk;
4038 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4042 /* Check if the extent after searching to the right implies a
4043 * cluster we can use. */
4044 if ((sbi->s_cluster_ratio > 1) && ex2 &&
4045 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4046 ar.len = allocated = map->m_len;
4047 newblock = map->m_pblk;
4048 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4049 goto got_allocated_blocks;
4053 * See if request is beyond maximum number of blocks we can have in
4054 * a single extent. For an initialized extent this limit is
4055 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4056 * EXT_UNINIT_MAX_LEN.
4058 if (map->m_len > EXT_INIT_MAX_LEN &&
4059 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4060 map->m_len = EXT_INIT_MAX_LEN;
4061 else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4062 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4063 map->m_len = EXT_UNINIT_MAX_LEN;
4065 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4066 newex.ee_len = cpu_to_le16(map->m_len);
4067 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4069 allocated = ext4_ext_get_actual_len(&newex);
4071 allocated = map->m_len;
4073 /* allocate new block */
4075 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4076 ar.logical = map->m_lblk;
4078 * We calculate the offset from the beginning of the cluster
4079 * for the logical block number, since when we allocate a
4080 * physical cluster, the physical block should start at the
4081 * same offset from the beginning of the cluster. This is
4082 * needed so that future calls to get_implied_cluster_alloc()
4085 offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4086 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4088 ar.logical -= offset;
4089 if (S_ISREG(inode->i_mode))
4090 ar.flags = EXT4_MB_HINT_DATA;
4092 /* disable in-core preallocation for non-regular files */
4094 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4095 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4096 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4099 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4100 ar.goal, newblock, allocated);
4102 allocated_clusters = ar.len;
4103 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4104 if (ar.len > allocated)
4107 got_allocated_blocks:
4108 /* try to insert new extent into found leaf and return */
4109 ext4_ext_store_pblock(&newex, newblock + offset);
4110 newex.ee_len = cpu_to_le16(ar.len);
4111 /* Mark uninitialized */
4112 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4113 ext4_ext_mark_uninitialized(&newex);
4115 * io_end structure was created for every IO write to an
4116 * uninitialized extent. To avoid unnecessary conversion,
4117 * here we flag the IO that really needs the conversion.
4118 * For non asycn direct IO case, flag the inode state
4119 * that we need to perform conversion when IO is done.
4121 if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4123 if (ext4_should_dioread_nolock(inode))
4124 map->m_flags |= EXT4_MAP_UNINIT;
4128 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4129 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4132 err = ext4_ext_insert_extent(handle, inode, path,
4135 if (!err && set_unwritten) {
4137 ext4_set_io_unwritten_flag(inode, io);
4139 ext4_set_inode_state(inode,
4140 EXT4_STATE_DIO_UNWRITTEN);
4143 if (err && free_on_err) {
4144 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4145 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4146 /* free data blocks we just allocated */
4147 /* not a good idea to call discard here directly,
4148 * but otherwise we'd need to call it every free() */
4149 ext4_discard_preallocations(inode);
4150 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4151 ext4_ext_get_actual_len(&newex), fb_flags);
4155 /* previous routine could use block we allocated */
4156 newblock = ext4_ext_pblock(&newex);
4157 allocated = ext4_ext_get_actual_len(&newex);
4158 if (allocated > map->m_len)
4159 allocated = map->m_len;
4160 map->m_flags |= EXT4_MAP_NEW;
4163 * Update reserved blocks/metadata blocks after successful
4164 * block allocation which had been deferred till now.
4166 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4167 unsigned int reserved_clusters;
4169 * Check how many clusters we had reserved this allocated range
4171 reserved_clusters = get_reserved_cluster_alloc(inode,
4172 map->m_lblk, allocated);
4173 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4174 if (reserved_clusters) {
4176 * We have clusters reserved for this range.
4177 * But since we are not doing actual allocation
4178 * and are simply using blocks from previously
4179 * allocated cluster, we should release the
4180 * reservation and not claim quota.
4182 ext4_da_update_reserve_space(inode,
4183 reserved_clusters, 0);
4186 BUG_ON(allocated_clusters < reserved_clusters);
4187 /* We will claim quota for all newly allocated blocks.*/
4188 ext4_da_update_reserve_space(inode, allocated_clusters,
4190 if (reserved_clusters < allocated_clusters) {
4191 struct ext4_inode_info *ei = EXT4_I(inode);
4192 int reservation = allocated_clusters -
4195 * It seems we claimed few clusters outside of
4196 * the range of this allocation. We should give
4197 * it back to the reservation pool. This can
4198 * happen in the following case:
4200 * * Suppose s_cluster_ratio is 4 (i.e., each
4201 * cluster has 4 blocks. Thus, the clusters
4202 * are [0-3],[4-7],[8-11]...
4203 * * First comes delayed allocation write for
4204 * logical blocks 10 & 11. Since there were no
4205 * previous delayed allocated blocks in the
4206 * range [8-11], we would reserve 1 cluster
4208 * * Next comes write for logical blocks 3 to 8.
4209 * In this case, we will reserve 2 clusters
4210 * (for [0-3] and [4-7]; and not for [8-11] as
4211 * that range has a delayed allocated blocks.
4212 * Thus total reserved clusters now becomes 3.
4213 * * Now, during the delayed allocation writeout
4214 * time, we will first write blocks [3-8] and
4215 * allocate 3 clusters for writing these
4216 * blocks. Also, we would claim all these
4217 * three clusters above.
4218 * * Now when we come here to writeout the
4219 * blocks [10-11], we would expect to claim
4220 * the reservation of 1 cluster we had made
4221 * (and we would claim it since there are no
4222 * more delayed allocated blocks in the range
4223 * [8-11]. But our reserved cluster count had
4224 * already gone to 0.
4226 * Thus, at the step 4 above when we determine
4227 * that there are still some unwritten delayed
4228 * allocated blocks outside of our current
4229 * block range, we should increment the
4230 * reserved clusters count so that when the
4231 * remaining blocks finally gets written, we
4234 dquot_reserve_block(inode,
4235 EXT4_C2B(sbi, reservation));
4236 spin_lock(&ei->i_block_reservation_lock);
4237 ei->i_reserved_data_blocks += reservation;
4238 spin_unlock(&ei->i_block_reservation_lock);
4244 * Cache the extent and update transaction to commit on fdatasync only
4245 * when it is _not_ an uninitialized extent.
4247 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4248 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4249 ext4_update_inode_fsync_trans(handle, inode, 1);
4251 ext4_update_inode_fsync_trans(handle, inode, 0);
4253 if (allocated > map->m_len)
4254 allocated = map->m_len;
4255 ext4_ext_show_leaf(inode, path);
4256 map->m_flags |= EXT4_MAP_MAPPED;
4257 map->m_pblk = newblock;
4258 map->m_len = allocated;
4261 ext4_ext_drop_refs(path);
4266 trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
4268 return err ? err : allocated;
4271 void ext4_ext_truncate(struct inode *inode)
4273 struct address_space *mapping = inode->i_mapping;
4274 struct super_block *sb = inode->i_sb;
4275 ext4_lblk_t last_block;
4281 * finish any pending end_io work so we won't run the risk of
4282 * converting any truncated blocks to initialized later
4284 ext4_flush_unwritten_io(inode);
4287 * probably first extent we're gonna free will be last in block
4289 err = ext4_writepage_trans_blocks(inode);
4290 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, err);
4294 if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4295 page_len = PAGE_CACHE_SIZE -
4296 (inode->i_size & (PAGE_CACHE_SIZE - 1));
4298 err = ext4_discard_partial_page_buffers(handle,
4299 mapping, inode->i_size, page_len, 0);
4305 if (ext4_orphan_add(handle, inode))
4308 down_write(&EXT4_I(inode)->i_data_sem);
4309 ext4_ext_invalidate_cache(inode);
4311 ext4_discard_preallocations(inode);
4314 * TODO: optimization is possible here.
4315 * Probably we need not scan at all,
4316 * because page truncation is enough.
4319 /* we have to know where to truncate from in crash case */
4320 EXT4_I(inode)->i_disksize = inode->i_size;
4321 ext4_mark_inode_dirty(handle, inode);
4323 last_block = (inode->i_size + sb->s_blocksize - 1)
4324 >> EXT4_BLOCK_SIZE_BITS(sb);
4325 err = ext4_es_remove_extent(inode, last_block,
4326 EXT_MAX_BLOCKS - last_block);
4327 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4329 /* In a multi-transaction truncate, we only make the final
4330 * transaction synchronous.
4333 ext4_handle_sync(handle);
4335 up_write(&EXT4_I(inode)->i_data_sem);
4339 * If this was a simple ftruncate() and the file will remain alive,
4340 * then we need to clear up the orphan record which we created above.
4341 * However, if this was a real unlink then we were called by
4342 * ext4_delete_inode(), and we allow that function to clean up the
4343 * orphan info for us.
4346 ext4_orphan_del(handle, inode);
4348 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4349 ext4_mark_inode_dirty(handle, inode);
4350 ext4_journal_stop(handle);
4353 static void ext4_falloc_update_inode(struct inode *inode,
4354 int mode, loff_t new_size, int update_ctime)
4356 struct timespec now;
4359 now = current_fs_time(inode->i_sb);
4360 if (!timespec_equal(&inode->i_ctime, &now))
4361 inode->i_ctime = now;
4364 * Update only when preallocation was requested beyond
4367 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4368 if (new_size > i_size_read(inode))
4369 i_size_write(inode, new_size);
4370 if (new_size > EXT4_I(inode)->i_disksize)
4371 ext4_update_i_disksize(inode, new_size);
4374 * Mark that we allocate beyond EOF so the subsequent truncate
4375 * can proceed even if the new size is the same as i_size.
4377 if (new_size > i_size_read(inode))
4378 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4384 * preallocate space for a file. This implements ext4's fallocate file
4385 * operation, which gets called from sys_fallocate system call.
4386 * For block-mapped files, posix_fallocate should fall back to the method
4387 * of writing zeroes to the required new blocks (the same behavior which is
4388 * expected for file systems which do not support fallocate() system call).
4390 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4392 struct inode *inode = file->f_path.dentry->d_inode;
4395 unsigned int max_blocks;
4400 struct ext4_map_blocks map;
4401 unsigned int credits, blkbits = inode->i_blkbits;
4403 /* Return error if mode is not supported */
4404 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4407 if (mode & FALLOC_FL_PUNCH_HOLE)
4408 return ext4_punch_hole(file, offset, len);
4410 ret = ext4_convert_inline_data(inode);
4415 * currently supporting (pre)allocate mode for extent-based
4418 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4421 trace_ext4_fallocate_enter(inode, offset, len, mode);
4422 map.m_lblk = offset >> blkbits;
4424 * We can't just convert len to max_blocks because
4425 * If blocksize = 4096 offset = 3072 and len = 2048
4427 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4430 * credits to insert 1 extent into extent tree
4432 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4433 mutex_lock(&inode->i_mutex);
4434 ret = inode_newsize_ok(inode, (len + offset));
4436 mutex_unlock(&inode->i_mutex);
4437 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4440 flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4441 if (mode & FALLOC_FL_KEEP_SIZE)
4442 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4444 * Don't normalize the request if it can fit in one extent so
4445 * that it doesn't get unnecessarily split into multiple
4448 if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4449 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4451 /* Prevent race condition between unwritten */
4452 ext4_flush_unwritten_io(inode);
4454 while (ret >= 0 && ret < max_blocks) {
4455 map.m_lblk = map.m_lblk + ret;
4456 map.m_len = max_blocks = max_blocks - ret;
4457 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4459 if (IS_ERR(handle)) {
4460 ret = PTR_ERR(handle);
4463 ret = ext4_map_blocks(handle, inode, &map, flags);
4466 ext4_warning(inode->i_sb,
4467 "inode #%lu: block %u: len %u: "
4468 "ext4_ext_map_blocks returned %d",
4469 inode->i_ino, map.m_lblk,
4472 ext4_mark_inode_dirty(handle, inode);
4473 ret2 = ext4_journal_stop(handle);
4476 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4477 blkbits) >> blkbits))
4478 new_size = offset + len;
4480 new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4482 ext4_falloc_update_inode(inode, mode, new_size,
4483 (map.m_flags & EXT4_MAP_NEW));
4484 ext4_mark_inode_dirty(handle, inode);
4485 if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4486 ext4_handle_sync(handle);
4487 ret2 = ext4_journal_stop(handle);
4491 if (ret == -ENOSPC &&
4492 ext4_should_retry_alloc(inode->i_sb, &retries)) {
4496 mutex_unlock(&inode->i_mutex);
4497 trace_ext4_fallocate_exit(inode, offset, max_blocks,
4498 ret > 0 ? ret2 : ret);
4499 return ret > 0 ? ret2 : ret;
4503 * This function convert a range of blocks to written extents
4504 * The caller of this function will pass the start offset and the size.
4505 * all unwritten extents within this range will be converted to
4508 * This function is called from the direct IO end io call back
4509 * function, to convert the fallocated extents after IO is completed.
4510 * Returns 0 on success.
4512 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4516 unsigned int max_blocks;
4519 struct ext4_map_blocks map;
4520 unsigned int credits, blkbits = inode->i_blkbits;
4522 map.m_lblk = offset >> blkbits;
4524 * We can't just convert len to max_blocks because
4525 * If blocksize = 4096 offset = 3072 and len = 2048
4527 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4530 * credits to insert 1 extent into extent tree
4532 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4533 while (ret >= 0 && ret < max_blocks) {
4535 map.m_len = (max_blocks -= ret);
4536 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
4537 if (IS_ERR(handle)) {
4538 ret = PTR_ERR(handle);
4541 ret = ext4_map_blocks(handle, inode, &map,
4542 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4544 ext4_warning(inode->i_sb,
4545 "inode #%lu: block %u: len %u: "
4546 "ext4_ext_map_blocks returned %d",
4547 inode->i_ino, map.m_lblk,
4549 ext4_mark_inode_dirty(handle, inode);
4550 ret2 = ext4_journal_stop(handle);
4551 if (ret <= 0 || ret2 )
4554 return ret > 0 ? ret2 : ret;
4558 * If newex is not existing extent (newex->ec_start equals zero) find
4559 * delayed extent at start of newex and update newex accordingly and
4560 * return start of the next delayed extent.
4562 * If newex is existing extent (newex->ec_start is not equal zero)
4563 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
4564 * extent found. Leave newex unmodified.
4566 static int ext4_find_delayed_extent(struct inode *inode,
4567 struct ext4_ext_cache *newex)
4569 struct extent_status es;
4570 ext4_lblk_t block, next_del;
4572 ext4_es_find_delayed_extent(inode, newex->ec_block, &es);
4574 if (newex->ec_start == 0) {
4576 * No extent in extent-tree contains block @newex->ec_start,
4577 * then the block may stay in 1)a hole or 2)delayed-extent.
4583 if (es.es_lblk > newex->ec_block) {
4585 newex->ec_len = min(es.es_lblk - newex->ec_block,
4590 newex->ec_len = es.es_lblk + es.es_len - newex->ec_block;
4593 block = newex->ec_block + newex->ec_len;
4594 ext4_es_find_delayed_extent(inode, block, &es);
4596 next_del = EXT_MAX_BLOCKS;
4598 next_del = es.es_lblk;
4602 /* fiemap flags we can handle specified here */
4603 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4605 static int ext4_xattr_fiemap(struct inode *inode,
4606 struct fiemap_extent_info *fieinfo)
4610 __u32 flags = FIEMAP_EXTENT_LAST;
4611 int blockbits = inode->i_sb->s_blocksize_bits;
4615 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4616 struct ext4_iloc iloc;
4617 int offset; /* offset of xattr in inode */
4619 error = ext4_get_inode_loc(inode, &iloc);
4622 physical = iloc.bh->b_blocknr << blockbits;
4623 offset = EXT4_GOOD_OLD_INODE_SIZE +
4624 EXT4_I(inode)->i_extra_isize;
4626 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4627 flags |= FIEMAP_EXTENT_DATA_INLINE;
4629 } else { /* external block */
4630 physical = EXT4_I(inode)->i_file_acl << blockbits;
4631 length = inode->i_sb->s_blocksize;
4635 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4637 return (error < 0 ? error : 0);
4641 * ext4_ext_punch_hole
4643 * Punches a hole of "length" bytes in a file starting
4646 * @inode: The inode of the file to punch a hole in
4647 * @offset: The starting byte offset of the hole
4648 * @length: The length of the hole
4650 * Returns the number of blocks removed or negative on err
4652 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4654 struct inode *inode = file->f_path.dentry->d_inode;
4655 struct super_block *sb = inode->i_sb;
4656 ext4_lblk_t first_block, stop_block;
4657 struct address_space *mapping = inode->i_mapping;
4659 loff_t first_page, last_page, page_len;
4660 loff_t first_page_offset, last_page_offset;
4661 int credits, err = 0;
4664 * Write out all dirty pages to avoid race conditions
4665 * Then release them.
4667 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4668 err = filemap_write_and_wait_range(mapping,
4669 offset, offset + length - 1);
4675 mutex_lock(&inode->i_mutex);
4676 /* It's not possible punch hole on append only file */
4677 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4681 if (IS_SWAPFILE(inode)) {
4686 /* No need to punch hole beyond i_size */
4687 if (offset >= inode->i_size)
4691 * If the hole extends beyond i_size, set the hole
4692 * to end after the page that contains i_size
4694 if (offset + length > inode->i_size) {
4695 length = inode->i_size +
4696 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4700 first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4701 last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4703 first_page_offset = first_page << PAGE_CACHE_SHIFT;
4704 last_page_offset = last_page << PAGE_CACHE_SHIFT;
4706 /* Now release the pages */
4707 if (last_page_offset > first_page_offset) {
4708 truncate_pagecache_range(inode, first_page_offset,
4709 last_page_offset - 1);
4712 /* Wait all existing dio workers, newcomers will block on i_mutex */
4713 ext4_inode_block_unlocked_dio(inode);
4714 err = ext4_flush_unwritten_io(inode);
4717 inode_dio_wait(inode);
4719 credits = ext4_writepage_trans_blocks(inode);
4720 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4721 if (IS_ERR(handle)) {
4722 err = PTR_ERR(handle);
4728 * Now we need to zero out the non-page-aligned data in the
4729 * pages at the start and tail of the hole, and unmap the buffer
4730 * heads for the block aligned regions of the page that were
4731 * completely zeroed.
4733 if (first_page > last_page) {
4735 * If the file space being truncated is contained within a page
4736 * just zero out and unmap the middle of that page
4738 err = ext4_discard_partial_page_buffers(handle,
4739 mapping, offset, length, 0);
4745 * zero out and unmap the partial page that contains
4746 * the start of the hole
4748 page_len = first_page_offset - offset;
4750 err = ext4_discard_partial_page_buffers(handle, mapping,
4751 offset, page_len, 0);
4757 * zero out and unmap the partial page that contains
4758 * the end of the hole
4760 page_len = offset + length - last_page_offset;
4762 err = ext4_discard_partial_page_buffers(handle, mapping,
4763 last_page_offset, page_len, 0);
4770 * If i_size is contained in the last page, we need to
4771 * unmap and zero the partial page after i_size
4773 if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4774 inode->i_size % PAGE_CACHE_SIZE != 0) {
4776 page_len = PAGE_CACHE_SIZE -
4777 (inode->i_size & (PAGE_CACHE_SIZE - 1));
4780 err = ext4_discard_partial_page_buffers(handle,
4781 mapping, inode->i_size, page_len, 0);
4788 first_block = (offset + sb->s_blocksize - 1) >>
4789 EXT4_BLOCK_SIZE_BITS(sb);
4790 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4792 /* If there are no blocks to remove, return now */
4793 if (first_block >= stop_block)
4796 down_write(&EXT4_I(inode)->i_data_sem);
4797 ext4_ext_invalidate_cache(inode);
4798 ext4_discard_preallocations(inode);
4800 err = ext4_es_remove_extent(inode, first_block,
4801 stop_block - first_block);
4802 err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4804 ext4_ext_invalidate_cache(inode);
4805 ext4_discard_preallocations(inode);
4808 ext4_handle_sync(handle);
4810 up_write(&EXT4_I(inode)->i_data_sem);
4813 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4814 ext4_mark_inode_dirty(handle, inode);
4815 ext4_journal_stop(handle);
4817 ext4_inode_resume_unlocked_dio(inode);
4819 mutex_unlock(&inode->i_mutex);
4823 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4824 __u64 start, __u64 len)
4826 ext4_lblk_t start_blk;
4829 if (ext4_has_inline_data(inode)) {
4832 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
4838 /* fallback to generic here if not in extents fmt */
4839 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4840 return generic_block_fiemap(inode, fieinfo, start, len,
4843 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4846 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4847 error = ext4_xattr_fiemap(inode, fieinfo);
4849 ext4_lblk_t len_blks;
4852 start_blk = start >> inode->i_sb->s_blocksize_bits;
4853 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4854 if (last_blk >= EXT_MAX_BLOCKS)
4855 last_blk = EXT_MAX_BLOCKS-1;
4856 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4859 * Walk the extent tree gathering extent information
4860 * and pushing extents back to the user.
4862 error = ext4_fill_fiemap_extents(inode, start_blk,