2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_format.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
45 /* Kernel only BMAP related definitions and functions */
48 * Convert the given file system block to a disk block. We have to treat it
49 * differently based on whether the file is a real time file or not, because the
53 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
55 return (XFS_IS_REALTIME_INODE(ip) ? \
56 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
57 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
61 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
62 * caller. Frees all the extents that need freeing, which must be done
63 * last due to locking considerations. We never free any extents in
64 * the first transaction.
66 * Return 1 if the given transaction was committed and a new one
67 * started, and 0 otherwise in the committed parameter.
71 xfs_trans_t **tp, /* transaction pointer addr */
72 xfs_bmap_free_t *flist, /* i/o: list extents to free */
73 int *committed) /* xact committed or not */
75 xfs_efd_log_item_t *efd; /* extent free data */
76 xfs_efi_log_item_t *efi; /* extent free intention */
77 int error; /* error return value */
78 xfs_bmap_free_item_t *free; /* free extent item */
79 struct xfs_trans_res tres; /* new log reservation */
80 xfs_mount_t *mp; /* filesystem mount structure */
81 xfs_bmap_free_item_t *next; /* next item on free list */
82 xfs_trans_t *ntp; /* new transaction pointer */
84 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
85 if (flist->xbf_count == 0) {
90 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
91 for (free = flist->xbf_first; free; free = free->xbfi_next)
92 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
93 free->xbfi_blockcount);
95 tres.tr_logres = ntp->t_log_res;
96 tres.tr_logcount = ntp->t_log_count;
97 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
98 ntp = xfs_trans_dup(*tp);
99 error = xfs_trans_commit(*tp, 0);
103 * We have a new transaction, so we should return committed=1,
104 * even though we're returning an error.
110 * transaction commit worked ok so we can drop the extra ticket
111 * reference that we gained in xfs_trans_dup()
113 xfs_log_ticket_put(ntp->t_ticket);
115 error = xfs_trans_reserve(ntp, &tres, 0, 0);
118 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
119 for (free = flist->xbf_first; free != NULL; free = next) {
120 next = free->xbfi_next;
121 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
122 free->xbfi_blockcount))) {
124 * The bmap free list will be cleaned up at a
125 * higher level. The EFI will be canceled when
126 * this transaction is aborted.
127 * Need to force shutdown here to make sure it
128 * happens, since this transaction may not be
132 if (!XFS_FORCED_SHUTDOWN(mp))
133 xfs_force_shutdown(mp,
134 (error == -EFSCORRUPTED) ?
135 SHUTDOWN_CORRUPT_INCORE :
136 SHUTDOWN_META_IO_ERROR);
139 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
140 free->xbfi_blockcount);
141 xfs_bmap_del_free(flist, NULL, free);
148 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
150 xfs_alloctype_t atype = 0; /* type for allocation routines */
151 int error; /* error return value */
152 xfs_mount_t *mp; /* mount point structure */
153 xfs_extlen_t prod = 0; /* product factor for allocators */
154 xfs_extlen_t ralen = 0; /* realtime allocation length */
155 xfs_extlen_t align; /* minimum allocation alignment */
158 mp = ap->ip->i_mount;
159 align = xfs_get_extsz_hint(ap->ip);
160 prod = align / mp->m_sb.sb_rextsize;
161 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
162 align, 1, ap->eof, 0,
163 ap->conv, &ap->offset, &ap->length);
167 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
170 * If the offset & length are not perfectly aligned
171 * then kill prod, it will just get us in trouble.
173 if (do_mod(ap->offset, align) || ap->length % align)
176 * Set ralen to be the actual requested length in rtextents.
178 ralen = ap->length / mp->m_sb.sb_rextsize;
180 * If the old value was close enough to MAXEXTLEN that
181 * we rounded up to it, cut it back so it's valid again.
182 * Note that if it's a really large request (bigger than
183 * MAXEXTLEN), we don't hear about that number, and can't
184 * adjust the starting point to match it.
186 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
187 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
190 * Lock out other modifications to the RT bitmap inode.
192 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
193 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
196 * If it's an allocation to an empty file at offset 0,
197 * pick an extent that will space things out in the rt area.
199 if (ap->eof && ap->offset == 0) {
200 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
202 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
205 ap->blkno = rtx * mp->m_sb.sb_rextsize;
210 xfs_bmap_adjacent(ap);
213 * Realtime allocation, done through xfs_rtallocate_extent.
215 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
216 do_div(ap->blkno, mp->m_sb.sb_rextsize);
219 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
220 &ralen, atype, ap->wasdel, prod, &rtb)))
222 if (rtb == NULLFSBLOCK && prod > 1 &&
223 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
224 ap->length, &ralen, atype,
225 ap->wasdel, 1, &rtb)))
228 if (ap->blkno != NULLFSBLOCK) {
229 ap->blkno *= mp->m_sb.sb_rextsize;
230 ralen *= mp->m_sb.sb_rextsize;
232 ap->ip->i_d.di_nblocks += ralen;
233 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
235 ap->ip->i_delayed_blks -= ralen;
237 * Adjust the disk quota also. This was reserved
240 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
241 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
242 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
250 * Check if the endoff is outside the last extent. If so the caller will grow
251 * the allocation to a stripe unit boundary. All offsets are considered outside
252 * the end of file for an empty fork, so 1 is returned in *eof in that case.
256 struct xfs_inode *ip,
257 xfs_fileoff_t endoff,
261 struct xfs_bmbt_irec rec;
264 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
268 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
273 * Extent tree block counting routines.
277 * Count leaf blocks given a range of extent records.
280 xfs_bmap_count_leaves(
288 for (b = 0; b < numrecs; b++) {
289 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
290 *count += xfs_bmbt_get_blockcount(frp);
295 * Count leaf blocks given a range of extent records originally
299 xfs_bmap_disk_count_leaves(
300 struct xfs_mount *mp,
301 struct xfs_btree_block *block,
308 for (b = 1; b <= numrecs; b++) {
309 frp = XFS_BMBT_REC_ADDR(mp, block, b);
310 *count += xfs_bmbt_disk_get_blockcount(frp);
315 * Recursively walks each level of a btree
316 * to count total fsblocks in use.
318 STATIC int /* error */
320 xfs_mount_t *mp, /* file system mount point */
321 xfs_trans_t *tp, /* transaction pointer */
322 xfs_ifork_t *ifp, /* inode fork pointer */
323 xfs_fsblock_t blockno, /* file system block number */
324 int levelin, /* level in btree */
325 int *count) /* Count of blocks */
331 xfs_fsblock_t bno = blockno;
332 xfs_fsblock_t nextbno;
333 struct xfs_btree_block *block, *nextblock;
336 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
341 block = XFS_BUF_TO_BLOCK(bp);
344 /* Not at node above leaves, count this level of nodes */
345 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
346 while (nextbno != NULLFSBLOCK) {
347 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
353 nextblock = XFS_BUF_TO_BLOCK(nbp);
354 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
355 xfs_trans_brelse(tp, nbp);
358 /* Dive to the next level */
359 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
360 bno = be64_to_cpu(*pp);
361 if (unlikely((error =
362 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
363 xfs_trans_brelse(tp, bp);
364 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
365 XFS_ERRLEVEL_LOW, mp);
366 return -EFSCORRUPTED;
368 xfs_trans_brelse(tp, bp);
370 /* count all level 1 nodes and their leaves */
372 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
373 numrecs = be16_to_cpu(block->bb_numrecs);
374 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
375 xfs_trans_brelse(tp, bp);
376 if (nextbno == NULLFSBLOCK)
379 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
385 block = XFS_BUF_TO_BLOCK(bp);
392 * Count fsblocks of the given fork.
395 xfs_bmap_count_blocks(
396 xfs_trans_t *tp, /* transaction pointer */
397 xfs_inode_t *ip, /* incore inode */
398 int whichfork, /* data or attr fork */
399 int *count) /* out: count of blocks */
401 struct xfs_btree_block *block; /* current btree block */
402 xfs_fsblock_t bno; /* block # of "block" */
403 xfs_ifork_t *ifp; /* fork structure */
404 int level; /* btree level, for checking */
405 xfs_mount_t *mp; /* file system mount structure */
406 __be64 *pp; /* pointer to block address */
410 ifp = XFS_IFORK_PTR(ip, whichfork);
411 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
412 xfs_bmap_count_leaves(ifp, 0,
413 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
419 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
421 block = ifp->if_broot;
422 level = be16_to_cpu(block->bb_level);
424 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
425 bno = be64_to_cpu(*pp);
426 ASSERT(bno != NULLFSBLOCK);
427 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
428 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
430 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
431 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
433 return -EFSCORRUPTED;
440 * returns 1 for success, 0 if we failed to map the extent.
443 xfs_getbmapx_fix_eof_hole(
444 xfs_inode_t *ip, /* xfs incore inode pointer */
445 struct getbmapx *out, /* output structure */
446 int prealloced, /* this is a file with
447 * preallocated data space */
448 __int64_t end, /* last block requested */
449 xfs_fsblock_t startblock)
452 xfs_mount_t *mp; /* file system mount point */
453 xfs_ifork_t *ifp; /* inode fork pointer */
454 xfs_extnum_t lastx; /* last extent pointer */
455 xfs_fileoff_t fileblock;
457 if (startblock == HOLESTARTBLOCK) {
460 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
461 fixlen -= out->bmv_offset;
462 if (prealloced && out->bmv_offset + out->bmv_length == end) {
463 /* Came to hole at EOF. Trim it. */
466 out->bmv_length = fixlen;
469 if (startblock == DELAYSTARTBLOCK)
472 out->bmv_block = xfs_fsb_to_db(ip, startblock);
473 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
474 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
475 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
476 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
477 out->bmv_oflags |= BMV_OF_LAST;
484 * Get inode's extents as described in bmv, and format for output.
485 * Calls formatter to fill the user's buffer until all extents
486 * are mapped, until the passed-in bmv->bmv_count slots have
487 * been filled, or until the formatter short-circuits the loop,
488 * if it is tracking filled-in extents on its own.
493 struct getbmapx *bmv, /* user bmap structure */
494 xfs_bmap_format_t formatter, /* format to user */
495 void *arg) /* formatter arg */
497 __int64_t bmvend; /* last block requested */
498 int error = 0; /* return value */
499 __int64_t fixlen; /* length for -1 case */
500 int i; /* extent number */
501 int lock; /* lock state */
502 xfs_bmbt_irec_t *map; /* buffer for user's data */
503 xfs_mount_t *mp; /* file system mount point */
504 int nex; /* # of user extents can do */
505 int nexleft; /* # of user extents left */
506 int subnex; /* # of bmapi's can do */
507 int nmap; /* number of map entries */
508 struct getbmapx *out; /* output structure */
509 int whichfork; /* data or attr fork */
510 int prealloced; /* this is a file with
511 * preallocated data space */
512 int iflags; /* interface flags */
513 int bmapi_flags; /* flags for xfs_bmapi */
517 iflags = bmv->bmv_iflags;
518 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
520 if (whichfork == XFS_ATTR_FORK) {
521 if (XFS_IFORK_Q(ip)) {
522 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
523 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
524 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
527 ip->i_d.di_aformat != 0 &&
528 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
529 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
531 return -EFSCORRUPTED;
537 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
538 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
539 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
542 if (xfs_get_extsz_hint(ip) ||
543 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
545 fixlen = mp->m_super->s_maxbytes;
548 fixlen = XFS_ISIZE(ip);
552 if (bmv->bmv_length == -1) {
553 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
555 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
556 } else if (bmv->bmv_length == 0) {
557 bmv->bmv_entries = 0;
559 } else if (bmv->bmv_length < 0) {
563 nex = bmv->bmv_count - 1;
566 bmvend = bmv->bmv_offset + bmv->bmv_length;
569 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
571 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
575 xfs_ilock(ip, XFS_IOLOCK_SHARED);
576 if (whichfork == XFS_DATA_FORK) {
577 if (!(iflags & BMV_IF_DELALLOC) &&
578 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
579 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
581 goto out_unlock_iolock;
584 * Even after flushing the inode, there can still be
585 * delalloc blocks on the inode beyond EOF due to
586 * speculative preallocation. These are not removed
587 * until the release function is called or the inode
588 * is inactivated. Hence we cannot assert here that
589 * ip->i_delayed_blks == 0.
593 lock = xfs_ilock_data_map_shared(ip);
595 lock = xfs_ilock_attr_map_shared(ip);
599 * Don't let nex be bigger than the number of extents
600 * we can have assuming alternating holes and real extents.
602 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
603 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
605 bmapi_flags = xfs_bmapi_aflag(whichfork);
606 if (!(iflags & BMV_IF_PREALLOC))
607 bmapi_flags |= XFS_BMAPI_IGSTATE;
610 * Allocate enough space to handle "subnex" maps at a time.
614 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
616 goto out_unlock_ilock;
618 bmv->bmv_entries = 0;
620 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
621 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
629 nmap = (nexleft > subnex) ? subnex : nexleft;
630 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
631 XFS_BB_TO_FSB(mp, bmv->bmv_length),
632 map, &nmap, bmapi_flags);
635 ASSERT(nmap <= subnex);
637 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
638 out[cur_ext].bmv_oflags = 0;
639 if (map[i].br_state == XFS_EXT_UNWRITTEN)
640 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
641 else if (map[i].br_startblock == DELAYSTARTBLOCK)
642 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
643 out[cur_ext].bmv_offset =
644 XFS_FSB_TO_BB(mp, map[i].br_startoff);
645 out[cur_ext].bmv_length =
646 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
647 out[cur_ext].bmv_unused1 = 0;
648 out[cur_ext].bmv_unused2 = 0;
651 * delayed allocation extents that start beyond EOF can
652 * occur due to speculative EOF allocation when the
653 * delalloc extent is larger than the largest freespace
654 * extent at conversion time. These extents cannot be
655 * converted by data writeback, so can exist here even
656 * if we are not supposed to be finding delalloc
659 if (map[i].br_startblock == DELAYSTARTBLOCK &&
660 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
661 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
663 if (map[i].br_startblock == HOLESTARTBLOCK &&
664 whichfork == XFS_ATTR_FORK) {
665 /* came to the end of attribute fork */
666 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
670 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
672 map[i].br_startblock))
676 out[cur_ext].bmv_offset +
677 out[cur_ext].bmv_length;
679 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
682 * In case we don't want to return the hole,
683 * don't increase cur_ext so that we can reuse
684 * it in the next loop.
686 if ((iflags & BMV_IF_NO_HOLES) &&
687 map[i].br_startblock == HOLESTARTBLOCK) {
688 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
696 } while (nmap && nexleft && bmv->bmv_length);
701 xfs_iunlock(ip, lock);
703 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
705 for (i = 0; i < cur_ext; i++) {
706 int full = 0; /* user array is full */
708 /* format results & advance arg */
709 error = formatter(&arg, &out[i], &full);
719 * dead simple method of punching delalyed allocation blocks from a range in
720 * the inode. Walks a block at a time so will be slow, but is only executed in
721 * rare error cases so the overhead is not critical. This will always punch out
722 * both the start and end blocks, even if the ranges only partially overlap
723 * them, so it is up to the caller to ensure that partial blocks are not
727 xfs_bmap_punch_delalloc_range(
728 struct xfs_inode *ip,
729 xfs_fileoff_t start_fsb,
730 xfs_fileoff_t length)
732 xfs_fileoff_t remaining = length;
735 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
739 xfs_bmbt_irec_t imap;
741 xfs_fsblock_t firstblock;
742 xfs_bmap_free_t flist;
745 * Map the range first and check that it is a delalloc extent
746 * before trying to unmap the range. Otherwise we will be
747 * trying to remove a real extent (which requires a
748 * transaction) or a hole, which is probably a bad idea...
750 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
754 /* something screwed, just bail */
755 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
756 xfs_alert(ip->i_mount,
757 "Failed delalloc mapping lookup ino %lld fsb %lld.",
758 ip->i_ino, start_fsb);
766 if (imap.br_startblock != DELAYSTARTBLOCK) {
767 /* been converted, ignore */
770 WARN_ON(imap.br_blockcount == 0);
773 * Note: while we initialise the firstblock/flist pair, they
774 * should never be used because blocks should never be
775 * allocated or freed for a delalloc extent and hence we need
776 * don't cancel or finish them after the xfs_bunmapi() call.
778 xfs_bmap_init(&flist, &firstblock);
779 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
784 ASSERT(!flist.xbf_count && !flist.xbf_first);
788 } while(remaining > 0);
794 * Test whether it is appropriate to check an inode for and free post EOF
795 * blocks. The 'force' parameter determines whether we should also consider
796 * regular files that are marked preallocated or append-only.
799 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
801 /* prealloc/delalloc exists only on regular files */
802 if (!S_ISREG(ip->i_d.di_mode))
806 * Zero sized files with no cached pages and delalloc blocks will not
807 * have speculative prealloc/delalloc blocks to remove.
809 if (VFS_I(ip)->i_size == 0 &&
810 VFS_I(ip)->i_mapping->nrpages == 0 &&
811 ip->i_delayed_blks == 0)
814 /* If we haven't read in the extent list, then don't do it now. */
815 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
819 * Do not free real preallocated or append-only files unless the file
820 * has delalloc blocks and we are forced to remove them.
822 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
823 if (!force || ip->i_delayed_blks == 0)
830 * This is called by xfs_inactive to free any blocks beyond eof
831 * when the link count isn't zero and by xfs_dm_punch_hole() when
832 * punching a hole to EOF.
842 xfs_fileoff_t end_fsb;
843 xfs_fileoff_t last_fsb;
844 xfs_filblks_t map_len;
846 xfs_bmbt_irec_t imap;
849 * Figure out if there are any blocks beyond the end
850 * of the file. If not, then there is nothing to do.
852 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
853 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
854 if (last_fsb <= end_fsb)
856 map_len = last_fsb - end_fsb;
859 xfs_ilock(ip, XFS_ILOCK_SHARED);
860 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
861 xfs_iunlock(ip, XFS_ILOCK_SHARED);
863 if (!error && (nimaps != 0) &&
864 (imap.br_startblock != HOLESTARTBLOCK ||
865 ip->i_delayed_blks)) {
867 * Attach the dquots to the inode up front.
869 error = xfs_qm_dqattach(ip, 0);
874 * There are blocks after the end of file.
875 * Free them up now by truncating the file to
878 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
881 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
882 xfs_trans_cancel(tp, 0);
887 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
889 ASSERT(XFS_FORCED_SHUTDOWN(mp));
890 xfs_trans_cancel(tp, 0);
892 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
896 xfs_ilock(ip, XFS_ILOCK_EXCL);
897 xfs_trans_ijoin(tp, ip, 0);
900 * Do not update the on-disk file size. If we update the
901 * on-disk file size and then the system crashes before the
902 * contents of the file are flushed to disk then the files
903 * may be full of holes (ie NULL files bug).
905 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
909 * If we get an error at this point we simply don't
910 * bother truncating the file.
913 (XFS_TRANS_RELEASE_LOG_RES |
916 error = xfs_trans_commit(tp,
917 XFS_TRANS_RELEASE_LOG_RES);
919 xfs_inode_clear_eofblocks_tag(ip);
922 xfs_iunlock(ip, XFS_ILOCK_EXCL);
924 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
930 xfs_alloc_file_space(
931 struct xfs_inode *ip,
936 xfs_mount_t *mp = ip->i_mount;
938 xfs_filblks_t allocated_fsb;
939 xfs_filblks_t allocatesize_fsb;
940 xfs_extlen_t extsz, temp;
941 xfs_fileoff_t startoffset_fsb;
942 xfs_fsblock_t firstfsb;
947 xfs_bmbt_irec_t imaps[1], *imapp;
948 xfs_bmap_free_t free_list;
949 uint qblocks, resblks, resrtextents;
953 trace_xfs_alloc_file_space(ip);
955 if (XFS_FORCED_SHUTDOWN(mp))
958 error = xfs_qm_dqattach(ip, 0);
965 rt = XFS_IS_REALTIME_INODE(ip);
966 extsz = xfs_get_extsz_hint(ip);
971 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
972 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
975 * Allocate file space until done or until there is an error
977 while (allocatesize_fsb && !error) {
981 * Determine space reservations for data/realtime.
983 if (unlikely(extsz)) {
987 e = startoffset_fsb + allocatesize_fsb;
988 if ((temp = do_mod(startoffset_fsb, extsz)))
990 if ((temp = do_mod(e, extsz)))
994 e = allocatesize_fsb;
998 * The transaction reservation is limited to a 32-bit block
999 * count, hence we need to limit the number of blocks we are
1000 * trying to reserve to avoid an overflow. We can't allocate
1001 * more than @nimaps extents, and an extent is limited on disk
1002 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1004 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1006 resrtextents = qblocks = resblks;
1007 resrtextents /= mp->m_sb.sb_rextsize;
1008 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1009 quota_flag = XFS_QMOPT_RES_RTBLKS;
1012 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1013 quota_flag = XFS_QMOPT_RES_REGBLKS;
1017 * Allocate and setup the transaction.
1019 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1020 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1021 resblks, resrtextents);
1023 * Check for running out of space
1027 * Free the transaction structure.
1029 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1030 xfs_trans_cancel(tp, 0);
1033 xfs_ilock(ip, XFS_ILOCK_EXCL);
1034 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1039 xfs_trans_ijoin(tp, ip, 0);
1041 xfs_bmap_init(&free_list, &firstfsb);
1042 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1043 allocatesize_fsb, alloc_type, &firstfsb,
1044 0, imapp, &nimaps, &free_list);
1050 * Complete the transaction
1052 error = xfs_bmap_finish(&tp, &free_list, &committed);
1057 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1058 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1063 allocated_fsb = imapp->br_blockcount;
1070 startoffset_fsb += allocated_fsb;
1071 allocatesize_fsb -= allocated_fsb;
1076 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1077 xfs_bmap_cancel(&free_list);
1078 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1080 error1: /* Just cancel transaction */
1081 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1082 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1087 * Zero file bytes between startoff and endoff inclusive.
1088 * The iolock is held exclusive and no blocks are buffered.
1090 * This function is used by xfs_free_file_space() to zero
1091 * partial blocks when the range to free is not block aligned.
1092 * When unreserving space with boundaries that are not block
1093 * aligned we round up the start and round down the end
1094 * boundaries and then use this function to zero the parts of
1095 * the blocks that got dropped during the rounding.
1098 xfs_zero_remaining_bytes(
1103 xfs_bmbt_irec_t imap;
1104 xfs_fileoff_t offset_fsb;
1105 xfs_off_t lastoffset;
1108 xfs_mount_t *mp = ip->i_mount;
1113 * Avoid doing I/O beyond eof - it's not necessary
1114 * since nothing can read beyond eof. The space will
1115 * be zeroed when the file is extended anyway.
1117 if (startoff >= XFS_ISIZE(ip))
1120 if (endoff > XFS_ISIZE(ip))
1121 endoff = XFS_ISIZE(ip);
1123 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1126 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1129 lock_mode = xfs_ilock_data_map_shared(ip);
1130 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1131 xfs_iunlock(ip, lock_mode);
1133 if (error || nimap < 1)
1135 ASSERT(imap.br_blockcount >= 1);
1136 ASSERT(imap.br_startoff == offset_fsb);
1137 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1138 if (lastoffset > endoff)
1139 lastoffset = endoff;
1140 if (imap.br_startblock == HOLESTARTBLOCK)
1142 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1143 if (imap.br_state == XFS_EXT_UNWRITTEN)
1146 error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
1147 mp->m_rtdev_targp : mp->m_ddev_targp,
1148 xfs_fsb_to_db(ip, imap.br_startblock),
1149 BTOBB(mp->m_sb.sb_blocksize),
1155 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1156 0, lastoffset - offset + 1);
1158 error = xfs_bwrite(bp);
1167 xfs_free_file_space(
1168 struct xfs_inode *ip,
1174 xfs_fileoff_t endoffset_fsb;
1176 xfs_fsblock_t firstfsb;
1177 xfs_bmap_free_t free_list;
1178 xfs_bmbt_irec_t imap;
1180 xfs_off_t iendoffset;
1187 xfs_fileoff_t startoffset_fsb;
1192 trace_xfs_free_file_space(ip);
1194 error = xfs_qm_dqattach(ip, 0);
1199 if (len <= 0) /* if nothing being freed */
1201 rt = XFS_IS_REALTIME_INODE(ip);
1202 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1203 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1205 /* wait for the completion of any pending DIOs */
1206 inode_dio_wait(VFS_I(ip));
1208 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1209 ioffset = round_down(offset, rounding);
1210 iendoffset = round_up(offset + len, rounding) - 1;
1211 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
1215 truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
1218 * Need to zero the stuff we're not freeing, on disk.
1219 * If it's a realtime file & can't use unwritten extents then we
1220 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1221 * will take care of it for us.
1223 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1225 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1229 ASSERT(nimap == 0 || nimap == 1);
1230 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1233 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1234 block = imap.br_startblock;
1235 mod = do_div(block, mp->m_sb.sb_rextsize);
1237 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1240 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1244 ASSERT(nimap == 0 || nimap == 1);
1245 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1246 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1248 if (mod && (mod != mp->m_sb.sb_rextsize))
1249 endoffset_fsb -= mod;
1252 if ((done = (endoffset_fsb <= startoffset_fsb)))
1254 * One contiguous piece to clear
1256 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1259 * Some full blocks, possibly two pieces to clear
1261 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1262 error = xfs_zero_remaining_bytes(ip, offset,
1263 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1265 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1266 error = xfs_zero_remaining_bytes(ip,
1267 XFS_FSB_TO_B(mp, endoffset_fsb),
1272 * free file space until done or until there is an error
1274 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1275 while (!error && !done) {
1278 * allocate and setup the transaction. Allow this
1279 * transaction to dip into the reserve blocks to ensure
1280 * the freeing of the space succeeds at ENOSPC.
1282 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1283 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1286 * check for running out of space
1290 * Free the transaction structure.
1292 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1293 xfs_trans_cancel(tp, 0);
1296 xfs_ilock(ip, XFS_ILOCK_EXCL);
1297 error = xfs_trans_reserve_quota(tp, mp,
1298 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1299 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1303 xfs_trans_ijoin(tp, ip, 0);
1306 * issue the bunmapi() call to free the blocks
1308 xfs_bmap_init(&free_list, &firstfsb);
1309 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1310 endoffset_fsb - startoffset_fsb,
1311 0, 2, &firstfsb, &free_list, &done);
1317 * complete the transaction
1319 error = xfs_bmap_finish(&tp, &free_list, &committed);
1324 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1325 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1332 xfs_bmap_cancel(&free_list);
1334 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1335 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1341 xfs_zero_file_space(
1342 struct xfs_inode *ip,
1346 struct xfs_mount *mp = ip->i_mount;
1348 xfs_off_t start_boundary;
1349 xfs_off_t end_boundary;
1352 trace_xfs_zero_file_space(ip);
1354 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1357 * Round the range of extents we are going to convert inwards. If the
1358 * offset is aligned, then it doesn't get changed so we zero from the
1359 * start of the block offset points to.
1361 start_boundary = round_up(offset, granularity);
1362 end_boundary = round_down(offset + len, granularity);
1364 ASSERT(start_boundary >= offset);
1365 ASSERT(end_boundary <= offset + len);
1367 if (start_boundary < end_boundary - 1) {
1369 * Writeback the range to ensure any inode size updates due to
1370 * appending writes make it to disk (otherwise we could just
1371 * punch out the delalloc blocks).
1373 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1374 start_boundary, end_boundary - 1);
1377 truncate_pagecache_range(VFS_I(ip), start_boundary,
1380 /* convert the blocks */
1381 error = xfs_alloc_file_space(ip, start_boundary,
1382 end_boundary - start_boundary - 1,
1383 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1387 /* We've handled the interior of the range, now for the edges */
1388 if (start_boundary != offset) {
1389 error = xfs_iozero(ip, offset, start_boundary - offset);
1394 if (end_boundary != offset + len)
1395 error = xfs_iozero(ip, end_boundary,
1396 offset + len - end_boundary);
1400 * It's either a sub-granularity range or the range spanned lies
1401 * partially across two adjacent blocks.
1403 error = xfs_iozero(ip, offset, len);
1412 * xfs_collapse_file_space()
1413 * This routine frees disk space and shift extent for the given file.
1414 * The first thing we do is to free data blocks in the specified range
1415 * by calling xfs_free_file_space(). It would also sync dirty data
1416 * and invalidate page cache over the region on which collapse range
1417 * is working. And Shift extent records to the left to cover a hole.
1424 xfs_collapse_file_space(
1425 struct xfs_inode *ip,
1430 struct xfs_mount *mp = ip->i_mount;
1431 struct xfs_trans *tp;
1433 struct xfs_bmap_free free_list;
1434 xfs_fsblock_t first_block;
1436 xfs_fileoff_t start_fsb;
1437 xfs_fileoff_t next_fsb;
1438 xfs_fileoff_t shift_fsb;
1440 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1442 trace_xfs_collapse_file_space(ip);
1444 next_fsb = XFS_B_TO_FSB(mp, offset + len);
1445 shift_fsb = XFS_B_TO_FSB(mp, len);
1447 error = xfs_free_file_space(ip, offset, len);
1452 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1453 * into the accessible region of the file.
1455 if (xfs_can_free_eofblocks(ip, true)) {
1456 error = xfs_free_eofblocks(mp, ip, false);
1462 * Writeback and invalidate cache for the remainder of the file as we're
1463 * about to shift down every extent from the collapse range to EOF. The
1464 * free of the collapse range above might have already done some of
1465 * this, but we shouldn't rely on it to do anything outside of the range
1468 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1472 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1473 (offset + len) >> PAGE_CACHE_SHIFT, -1);
1477 while (!error && !done) {
1478 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1480 * We would need to reserve permanent block for transaction.
1481 * This will come into picture when after shifting extent into
1482 * hole we found that adjacent extents can be merged which
1483 * may lead to freeing of a block during record update.
1485 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1486 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1488 xfs_trans_cancel(tp, 0);
1492 xfs_ilock(ip, XFS_ILOCK_EXCL);
1493 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1494 ip->i_gdquot, ip->i_pdquot,
1495 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1496 XFS_QMOPT_RES_REGBLKS);
1500 xfs_trans_ijoin(tp, ip, 0);
1502 xfs_bmap_init(&free_list, &first_block);
1505 * We are using the write transaction in which max 2 bmbt
1506 * updates are allowed
1508 start_fsb = next_fsb;
1509 error = xfs_bmap_shift_extents(tp, ip, start_fsb, shift_fsb,
1510 &done, &next_fsb, &first_block, &free_list,
1511 XFS_BMAP_MAX_SHIFT_EXTENTS);
1515 error = xfs_bmap_finish(&tp, &free_list, &committed);
1519 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1520 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1526 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1527 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1532 * We need to check that the format of the data fork in the temporary inode is
1533 * valid for the target inode before doing the swap. This is not a problem with
1534 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1535 * data fork depending on the space the attribute fork is taking so we can get
1536 * invalid formats on the target inode.
1538 * E.g. target has space for 7 extents in extent format, temp inode only has
1539 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1540 * btree, but when swapped it needs to be in extent format. Hence we can't just
1541 * blindly swap data forks on attr2 filesystems.
1543 * Note that we check the swap in both directions so that we don't end up with
1544 * a corrupt temporary inode, either.
1546 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1547 * inode will prevent this situation from occurring, so all we do here is
1548 * reject and log the attempt. basically we are putting the responsibility on
1549 * userspace to get this right.
1552 xfs_swap_extents_check_format(
1553 xfs_inode_t *ip, /* target inode */
1554 xfs_inode_t *tip) /* tmp inode */
1557 /* Should never get a local format */
1558 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1559 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1563 * if the target inode has less extents that then temporary inode then
1564 * why did userspace call us?
1566 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1570 * if the target inode is in extent form and the temp inode is in btree
1571 * form then we will end up with the target inode in the wrong format
1572 * as we already know there are less extents in the temp inode.
1574 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1575 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1578 /* Check temp in extent form to max in target */
1579 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1580 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1581 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1584 /* Check target in extent form to max in temp */
1585 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1586 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1587 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1591 * If we are in a btree format, check that the temp root block will fit
1592 * in the target and that it has enough extents to be in btree format
1595 * Note that we have to be careful to allow btree->extent conversions
1596 * (a common defrag case) which will occur when the temp inode is in
1599 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1600 if (XFS_IFORK_BOFF(ip) &&
1601 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1603 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1604 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1608 /* Reciprocal target->temp btree format checks */
1609 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1610 if (XFS_IFORK_BOFF(tip) &&
1611 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1613 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1614 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1622 xfs_swap_extent_flush(
1623 struct xfs_inode *ip)
1627 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1630 truncate_pagecache_range(VFS_I(ip), 0, -1);
1632 /* Verify O_DIRECT for ftmp */
1633 if (VFS_I(ip)->i_mapping->nrpages)
1637 * Don't try to swap extents on mmap()d files because we can't lock
1638 * out races against page faults safely.
1640 if (mapping_mapped(VFS_I(ip)->i_mapping))
1647 xfs_inode_t *ip, /* target inode */
1648 xfs_inode_t *tip, /* tmp inode */
1651 xfs_mount_t *mp = ip->i_mount;
1653 xfs_bstat_t *sbp = &sxp->sx_stat;
1654 xfs_ifork_t *tempifp, *ifp, *tifp;
1655 int src_log_flags, target_log_flags;
1662 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1669 * Lock up the inodes against other IO and truncate to begin with.
1670 * Then we can ensure the inodes are flushed and have no page cache
1671 * safely. Once we have done this we can take the ilocks and do the rest
1674 lock_flags = XFS_IOLOCK_EXCL;
1675 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1677 /* Verify that both files have the same format */
1678 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1683 /* Verify both files are either real-time or non-realtime */
1684 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1689 error = xfs_swap_extent_flush(ip);
1692 error = xfs_swap_extent_flush(tip);
1696 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1697 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1699 xfs_trans_cancel(tp, 0);
1702 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1703 lock_flags |= XFS_ILOCK_EXCL;
1705 /* Verify all data are being swapped */
1706 if (sxp->sx_offset != 0 ||
1707 sxp->sx_length != ip->i_d.di_size ||
1708 sxp->sx_length != tip->i_d.di_size) {
1710 goto out_trans_cancel;
1713 trace_xfs_swap_extent_before(ip, 0);
1714 trace_xfs_swap_extent_before(tip, 1);
1716 /* check inode formats now that data is flushed */
1717 error = xfs_swap_extents_check_format(ip, tip);
1720 "%s: inode 0x%llx format is incompatible for exchanging.",
1721 __func__, ip->i_ino);
1722 goto out_trans_cancel;
1726 * Compare the current change & modify times with that
1727 * passed in. If they differ, we abort this swap.
1728 * This is the mechanism used to ensure the calling
1729 * process that the file was not changed out from
1732 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1733 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1734 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1735 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1737 goto out_trans_cancel;
1740 * Count the number of extended attribute blocks
1742 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1743 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1744 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1746 goto out_trans_cancel;
1748 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1749 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1750 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1753 goto out_trans_cancel;
1756 xfs_trans_ijoin(tp, ip, lock_flags);
1757 xfs_trans_ijoin(tp, tip, lock_flags);
1760 * Before we've swapped the forks, lets set the owners of the forks
1761 * appropriately. We have to do this as we are demand paging the btree
1762 * buffers, and so the validation done on read will expect the owner
1763 * field to be correctly set. Once we change the owners, we can swap the
1766 * Note the trickiness in setting the log flags - we set the owner log
1767 * flag on the opposite inode (i.e. the inode we are setting the new
1768 * owner to be) because once we swap the forks and log that, log
1769 * recovery is going to see the fork as owned by the swapped inode,
1770 * not the pre-swapped inodes.
1772 src_log_flags = XFS_ILOG_CORE;
1773 target_log_flags = XFS_ILOG_CORE;
1774 if (ip->i_d.di_version == 3 &&
1775 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1776 target_log_flags |= XFS_ILOG_DOWNER;
1777 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1780 goto out_trans_cancel;
1783 if (tip->i_d.di_version == 3 &&
1784 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1785 src_log_flags |= XFS_ILOG_DOWNER;
1786 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1789 goto out_trans_cancel;
1793 * Swap the data forks of the inodes
1797 *tempifp = *ifp; /* struct copy */
1798 *ifp = *tifp; /* struct copy */
1799 *tifp = *tempifp; /* struct copy */
1802 * Fix the on-disk inode values
1804 tmp = (__uint64_t)ip->i_d.di_nblocks;
1805 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1806 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1808 tmp = (__uint64_t) ip->i_d.di_nextents;
1809 ip->i_d.di_nextents = tip->i_d.di_nextents;
1810 tip->i_d.di_nextents = tmp;
1812 tmp = (__uint64_t) ip->i_d.di_format;
1813 ip->i_d.di_format = tip->i_d.di_format;
1814 tip->i_d.di_format = tmp;
1817 * The extents in the source inode could still contain speculative
1818 * preallocation beyond EOF (e.g. the file is open but not modified
1819 * while defrag is in progress). In that case, we need to copy over the
1820 * number of delalloc blocks the data fork in the source inode is
1821 * tracking beyond EOF so that when the fork is truncated away when the
1822 * temporary inode is unlinked we don't underrun the i_delayed_blks
1823 * counter on that inode.
1825 ASSERT(tip->i_delayed_blks == 0);
1826 tip->i_delayed_blks = ip->i_delayed_blks;
1827 ip->i_delayed_blks = 0;
1829 switch (ip->i_d.di_format) {
1830 case XFS_DINODE_FMT_EXTENTS:
1831 /* If the extents fit in the inode, fix the
1832 * pointer. Otherwise it's already NULL or
1833 * pointing to the extent.
1835 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1836 ifp->if_u1.if_extents =
1837 ifp->if_u2.if_inline_ext;
1839 src_log_flags |= XFS_ILOG_DEXT;
1841 case XFS_DINODE_FMT_BTREE:
1842 ASSERT(ip->i_d.di_version < 3 ||
1843 (src_log_flags & XFS_ILOG_DOWNER));
1844 src_log_flags |= XFS_ILOG_DBROOT;
1848 switch (tip->i_d.di_format) {
1849 case XFS_DINODE_FMT_EXTENTS:
1850 /* If the extents fit in the inode, fix the
1851 * pointer. Otherwise it's already NULL or
1852 * pointing to the extent.
1854 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1855 tifp->if_u1.if_extents =
1856 tifp->if_u2.if_inline_ext;
1858 target_log_flags |= XFS_ILOG_DEXT;
1860 case XFS_DINODE_FMT_BTREE:
1861 target_log_flags |= XFS_ILOG_DBROOT;
1862 ASSERT(tip->i_d.di_version < 3 ||
1863 (target_log_flags & XFS_ILOG_DOWNER));
1867 xfs_trans_log_inode(tp, ip, src_log_flags);
1868 xfs_trans_log_inode(tp, tip, target_log_flags);
1871 * If this is a synchronous mount, make sure that the
1872 * transaction goes to disk before returning to the user.
1874 if (mp->m_flags & XFS_MOUNT_WSYNC)
1875 xfs_trans_set_sync(tp);
1877 error = xfs_trans_commit(tp, 0);
1879 trace_xfs_swap_extent_after(ip, 0);
1880 trace_xfs_swap_extent_after(tip, 1);
1886 xfs_iunlock(ip, lock_flags);
1887 xfs_iunlock(tip, lock_flags);
1891 xfs_trans_cancel(tp, 0);