xfs: change the xfs_iext_insert / xfs_iext_remove
[firefly-linux-kernel-4.4.55.git] / fs / xfs / xfs_bmap.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir2_sf.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_btree.h"
37 #include "xfs_dmapi.h"
38 #include "xfs_mount.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_dir2_data.h"
42 #include "xfs_dir2_leaf.h"
43 #include "xfs_dir2_block.h"
44 #include "xfs_inode_item.h"
45 #include "xfs_extfree_item.h"
46 #include "xfs_alloc.h"
47 #include "xfs_bmap.h"
48 #include "xfs_rtalloc.h"
49 #include "xfs_error.h"
50 #include "xfs_attr_leaf.h"
51 #include "xfs_rw.h"
52 #include "xfs_quota.h"
53 #include "xfs_trans_space.h"
54 #include "xfs_buf_item.h"
55 #include "xfs_filestream.h"
56 #include "xfs_vnodeops.h"
57
58
59 #ifdef DEBUG
60 STATIC void
61 xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
62 #endif
63
64 kmem_zone_t             *xfs_bmap_free_item_zone;
65
66 /*
67  * Prototypes for internal bmap routines.
68  */
69
70
71 /*
72  * Called from xfs_bmap_add_attrfork to handle extents format files.
73  */
74 STATIC int                                      /* error */
75 xfs_bmap_add_attrfork_extents(
76         xfs_trans_t             *tp,            /* transaction pointer */
77         xfs_inode_t             *ip,            /* incore inode pointer */
78         xfs_fsblock_t           *firstblock,    /* first block allocated */
79         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
80         int                     *flags);        /* inode logging flags */
81
82 /*
83  * Called from xfs_bmap_add_attrfork to handle local format files.
84  */
85 STATIC int                                      /* error */
86 xfs_bmap_add_attrfork_local(
87         xfs_trans_t             *tp,            /* transaction pointer */
88         xfs_inode_t             *ip,            /* incore inode pointer */
89         xfs_fsblock_t           *firstblock,    /* first block allocated */
90         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
91         int                     *flags);        /* inode logging flags */
92
93 /*
94  * Called by xfs_bmapi to update file extent records and the btree
95  * after allocating space (or doing a delayed allocation).
96  */
97 STATIC int                              /* error */
98 xfs_bmap_add_extent(
99         xfs_inode_t             *ip,    /* incore inode pointer */
100         xfs_extnum_t            idx,    /* extent number to update/insert */
101         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
102         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
103         xfs_fsblock_t           *first, /* pointer to firstblock variable */
104         xfs_bmap_free_t         *flist, /* list of extents to be freed */
105         int                     *logflagsp, /* inode logging flags */
106         xfs_extdelta_t          *delta, /* Change made to incore extents */
107         int                     whichfork, /* data or attr fork */
108         int                     rsvd);  /* OK to allocate reserved blocks */
109
110 /*
111  * Called by xfs_bmap_add_extent to handle cases converting a delayed
112  * allocation to a real allocation.
113  */
114 STATIC int                              /* error */
115 xfs_bmap_add_extent_delay_real(
116         xfs_inode_t             *ip,    /* incore inode pointer */
117         xfs_extnum_t            idx,    /* extent number to update/insert */
118         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
119         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
120         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
121         xfs_fsblock_t           *first, /* pointer to firstblock variable */
122         xfs_bmap_free_t         *flist, /* list of extents to be freed */
123         int                     *logflagsp, /* inode logging flags */
124         xfs_extdelta_t          *delta, /* Change made to incore extents */
125         int                     rsvd);  /* OK to allocate reserved blocks */
126
127 /*
128  * Called by xfs_bmap_add_extent to handle cases converting a hole
129  * to a delayed allocation.
130  */
131 STATIC int                              /* error */
132 xfs_bmap_add_extent_hole_delay(
133         xfs_inode_t             *ip,    /* incore inode pointer */
134         xfs_extnum_t            idx,    /* extent number to update/insert */
135         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
136         int                     *logflagsp,/* inode logging flags */
137         xfs_extdelta_t          *delta, /* Change made to incore extents */
138         int                     rsvd);  /* OK to allocate reserved blocks */
139
140 /*
141  * Called by xfs_bmap_add_extent to handle cases converting a hole
142  * to a real allocation.
143  */
144 STATIC int                              /* error */
145 xfs_bmap_add_extent_hole_real(
146         xfs_inode_t             *ip,    /* incore inode pointer */
147         xfs_extnum_t            idx,    /* extent number to update/insert */
148         xfs_btree_cur_t         *cur,   /* if null, not a btree */
149         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
150         int                     *logflagsp, /* inode logging flags */
151         xfs_extdelta_t          *delta, /* Change made to incore extents */
152         int                     whichfork); /* data or attr fork */
153
154 /*
155  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
156  * allocation to a real allocation or vice versa.
157  */
158 STATIC int                              /* error */
159 xfs_bmap_add_extent_unwritten_real(
160         xfs_inode_t             *ip,    /* incore inode pointer */
161         xfs_extnum_t            idx,    /* extent number to update/insert */
162         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
163         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
164         int                     *logflagsp, /* inode logging flags */
165         xfs_extdelta_t          *delta); /* Change made to incore extents */
166
167 /*
168  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
169  * It figures out where to ask the underlying allocator to put the new extent.
170  */
171 STATIC int                              /* error */
172 xfs_bmap_alloc(
173         xfs_bmalloca_t          *ap);   /* bmap alloc argument struct */
174
175 /*
176  * Transform a btree format file with only one leaf node, where the
177  * extents list will fit in the inode, into an extents format file.
178  * Since the file extents are already in-core, all we have to do is
179  * give up the space for the btree root and pitch the leaf block.
180  */
181 STATIC int                              /* error */
182 xfs_bmap_btree_to_extents(
183         xfs_trans_t             *tp,    /* transaction pointer */
184         xfs_inode_t             *ip,    /* incore inode pointer */
185         xfs_btree_cur_t         *cur,   /* btree cursor */
186         int                     *logflagsp, /* inode logging flags */
187         int                     whichfork); /* data or attr fork */
188
189 /*
190  * Called by xfs_bmapi to update file extent records and the btree
191  * after removing space (or undoing a delayed allocation).
192  */
193 STATIC int                              /* error */
194 xfs_bmap_del_extent(
195         xfs_inode_t             *ip,    /* incore inode pointer */
196         xfs_trans_t             *tp,    /* current trans pointer */
197         xfs_extnum_t            idx,    /* extent number to update/insert */
198         xfs_bmap_free_t         *flist, /* list of extents to be freed */
199         xfs_btree_cur_t         *cur,   /* if null, not a btree */
200         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
201         int                     *logflagsp,/* inode logging flags */
202         xfs_extdelta_t          *delta, /* Change made to incore extents */
203         int                     whichfork, /* data or attr fork */
204         int                     rsvd);   /* OK to allocate reserved blocks */
205
206 /*
207  * Remove the entry "free" from the free item list.  Prev points to the
208  * previous entry, unless "free" is the head of the list.
209  */
210 STATIC void
211 xfs_bmap_del_free(
212         xfs_bmap_free_t         *flist, /* free item list header */
213         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
214         xfs_bmap_free_item_t    *free); /* list item to be freed */
215
216 /*
217  * Convert an extents-format file into a btree-format file.
218  * The new file will have a root block (in the inode) and a single child block.
219  */
220 STATIC int                                      /* error */
221 xfs_bmap_extents_to_btree(
222         xfs_trans_t             *tp,            /* transaction pointer */
223         xfs_inode_t             *ip,            /* incore inode pointer */
224         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
225         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
226         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
227         int                     wasdel,         /* converting a delayed alloc */
228         int                     *logflagsp,     /* inode logging flags */
229         int                     whichfork);     /* data or attr fork */
230
231 /*
232  * Convert a local file to an extents file.
233  * This code is sort of bogus, since the file data needs to get
234  * logged so it won't be lost.  The bmap-level manipulations are ok, though.
235  */
236 STATIC int                              /* error */
237 xfs_bmap_local_to_extents(
238         xfs_trans_t     *tp,            /* transaction pointer */
239         xfs_inode_t     *ip,            /* incore inode pointer */
240         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
241         xfs_extlen_t    total,          /* total blocks needed by transaction */
242         int             *logflagsp,     /* inode logging flags */
243         int             whichfork);     /* data or attr fork */
244
245 /*
246  * Search the extents list for the inode, for the extent containing bno.
247  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
248  * *eofp will be set, and *prevp will contain the last entry (null if none).
249  * Else, *lastxp will be set to the index of the found
250  * entry; *gotp will contain the entry.
251  */
252 STATIC xfs_bmbt_rec_host_t *            /* pointer to found extent entry */
253 xfs_bmap_search_extents(
254         xfs_inode_t     *ip,            /* incore inode pointer */
255         xfs_fileoff_t   bno,            /* block number searched for */
256         int             whichfork,      /* data or attr fork */
257         int             *eofp,          /* out: end of file found */
258         xfs_extnum_t    *lastxp,        /* out: last extent index */
259         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
260         xfs_bmbt_irec_t *prevp);        /* out: previous extent entry found */
261
262 /*
263  * Check the last inode extent to determine whether this allocation will result
264  * in blocks being allocated at the end of the file. When we allocate new data
265  * blocks at the end of the file which do not start at the previous data block,
266  * we will try to align the new blocks at stripe unit boundaries.
267  */
268 STATIC int                              /* error */
269 xfs_bmap_isaeof(
270         xfs_inode_t     *ip,            /* incore inode pointer */
271         xfs_fileoff_t   off,            /* file offset in fsblocks */
272         int             whichfork,      /* data or attribute fork */
273         char            *aeof);         /* return value */
274
275 #ifdef XFS_BMAP_TRACE
276 /*
277  * Add bmap trace entry prior to a call to xfs_iext_remove.
278  */
279 STATIC void
280 xfs_bmap_trace_delete(
281         const char      *fname,         /* function name */
282         char            *desc,          /* operation description */
283         xfs_inode_t     *ip,            /* incore inode pointer */
284         xfs_extnum_t    idx,            /* index of entry(entries) deleted */
285         xfs_extnum_t    cnt,            /* count of entries deleted, 1 or 2 */
286         int             whichfork);     /* data or attr fork */
287
288 /*
289  * Add bmap trace entry prior to a call to xfs_iext_insert, or
290  * reading in the extents list from the disk (in the btree).
291  */
292 STATIC void
293 xfs_bmap_trace_insert(
294         const char      *fname,         /* function name */
295         char            *desc,          /* operation description */
296         xfs_inode_t     *ip,            /* incore inode pointer */
297         xfs_extnum_t    idx,            /* index of entry(entries) inserted */
298         xfs_extnum_t    cnt,            /* count of entries inserted, 1 or 2 */
299         xfs_bmbt_irec_t *r1,            /* inserted record 1 */
300         xfs_bmbt_irec_t *r2,            /* inserted record 2 or null */
301         int             whichfork);     /* data or attr fork */
302
303 /*
304  * Add bmap trace entry after updating an extent record in place.
305  */
306 STATIC void
307 xfs_bmap_trace_post_update(
308         const char      *fname,         /* function name */
309         char            *desc,          /* operation description */
310         xfs_inode_t     *ip,            /* incore inode pointer */
311         xfs_extnum_t    idx,            /* index of entry updated */
312         int             whichfork);     /* data or attr fork */
313
314 /*
315  * Add bmap trace entry prior to updating an extent record in place.
316  */
317 STATIC void
318 xfs_bmap_trace_pre_update(
319         const char      *fname,         /* function name */
320         char            *desc,          /* operation description */
321         xfs_inode_t     *ip,            /* incore inode pointer */
322         xfs_extnum_t    idx,            /* index of entry to be updated */
323         int             whichfork);     /* data or attr fork */
324
325 #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)       \
326         xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
327 #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
328         xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
329 #define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)    \
330         xfs_bmap_trace_post_update(__func__,d,ip,i,w)
331 #define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)     \
332         xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
333 #else
334 #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
335 #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
336 #define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
337 #define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
338 #endif  /* XFS_BMAP_TRACE */
339
340 /*
341  * Compute the worst-case number of indirect blocks that will be used
342  * for ip's delayed extent of length "len".
343  */
344 STATIC xfs_filblks_t
345 xfs_bmap_worst_indlen(
346         xfs_inode_t             *ip,    /* incore inode pointer */
347         xfs_filblks_t           len);   /* delayed extent length */
348
349 #ifdef DEBUG
350 /*
351  * Perform various validation checks on the values being returned
352  * from xfs_bmapi().
353  */
354 STATIC void
355 xfs_bmap_validate_ret(
356         xfs_fileoff_t           bno,
357         xfs_filblks_t           len,
358         int                     flags,
359         xfs_bmbt_irec_t         *mval,
360         int                     nmap,
361         int                     ret_nmap);
362 #else
363 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
364 #endif /* DEBUG */
365
366 #if defined(XFS_RW_TRACE)
367 STATIC void
368 xfs_bunmap_trace(
369         xfs_inode_t             *ip,
370         xfs_fileoff_t           bno,
371         xfs_filblks_t           len,
372         int                     flags,
373         inst_t                  *ra);
374 #else
375 #define xfs_bunmap_trace(ip, bno, len, flags, ra)
376 #endif  /* XFS_RW_TRACE */
377
378 STATIC int
379 xfs_bmap_count_tree(
380         xfs_mount_t     *mp,
381         xfs_trans_t     *tp,
382         xfs_ifork_t     *ifp,
383         xfs_fsblock_t   blockno,
384         int             levelin,
385         int             *count);
386
387 STATIC void
388 xfs_bmap_count_leaves(
389         xfs_ifork_t             *ifp,
390         xfs_extnum_t            idx,
391         int                     numrecs,
392         int                     *count);
393
394 STATIC void
395 xfs_bmap_disk_count_leaves(
396         struct xfs_mount        *mp,
397         struct xfs_btree_block  *block,
398         int                     numrecs,
399         int                     *count);
400
401 /*
402  * Bmap internal routines.
403  */
404
405 STATIC int                              /* error */
406 xfs_bmbt_lookup_eq(
407         struct xfs_btree_cur    *cur,
408         xfs_fileoff_t           off,
409         xfs_fsblock_t           bno,
410         xfs_filblks_t           len,
411         int                     *stat)  /* success/failure */
412 {
413         cur->bc_rec.b.br_startoff = off;
414         cur->bc_rec.b.br_startblock = bno;
415         cur->bc_rec.b.br_blockcount = len;
416         return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
417 }
418
419 STATIC int                              /* error */
420 xfs_bmbt_lookup_ge(
421         struct xfs_btree_cur    *cur,
422         xfs_fileoff_t           off,
423         xfs_fsblock_t           bno,
424         xfs_filblks_t           len,
425         int                     *stat)  /* success/failure */
426 {
427         cur->bc_rec.b.br_startoff = off;
428         cur->bc_rec.b.br_startblock = bno;
429         cur->bc_rec.b.br_blockcount = len;
430         return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
431 }
432
433 /*
434 * Update the record referred to by cur to the value given
435  * by [off, bno, len, state].
436  * This either works (return 0) or gets an EFSCORRUPTED error.
437  */
438 STATIC int
439 xfs_bmbt_update(
440         struct xfs_btree_cur    *cur,
441         xfs_fileoff_t           off,
442         xfs_fsblock_t           bno,
443         xfs_filblks_t           len,
444         xfs_exntst_t            state)
445 {
446         union xfs_btree_rec     rec;
447
448         xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
449         return xfs_btree_update(cur, &rec);
450 }
451
452 /*
453  * Called from xfs_bmap_add_attrfork to handle btree format files.
454  */
455 STATIC int                                      /* error */
456 xfs_bmap_add_attrfork_btree(
457         xfs_trans_t             *tp,            /* transaction pointer */
458         xfs_inode_t             *ip,            /* incore inode pointer */
459         xfs_fsblock_t           *firstblock,    /* first block allocated */
460         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
461         int                     *flags)         /* inode logging flags */
462 {
463         xfs_btree_cur_t         *cur;           /* btree cursor */
464         int                     error;          /* error return value */
465         xfs_mount_t             *mp;            /* file system mount struct */
466         int                     stat;           /* newroot status */
467
468         mp = ip->i_mount;
469         if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
470                 *flags |= XFS_ILOG_DBROOT;
471         else {
472                 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
473                 cur->bc_private.b.flist = flist;
474                 cur->bc_private.b.firstblock = *firstblock;
475                 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
476                         goto error0;
477                 /* must be at least one entry */
478                 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
479                 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
480                         goto error0;
481                 if (stat == 0) {
482                         xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
483                         return XFS_ERROR(ENOSPC);
484                 }
485                 *firstblock = cur->bc_private.b.firstblock;
486                 cur->bc_private.b.allocated = 0;
487                 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
488         }
489         return 0;
490 error0:
491         xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
492         return error;
493 }
494
495 /*
496  * Called from xfs_bmap_add_attrfork to handle extents format files.
497  */
498 STATIC int                                      /* error */
499 xfs_bmap_add_attrfork_extents(
500         xfs_trans_t             *tp,            /* transaction pointer */
501         xfs_inode_t             *ip,            /* incore inode pointer */
502         xfs_fsblock_t           *firstblock,    /* first block allocated */
503         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
504         int                     *flags)         /* inode logging flags */
505 {
506         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
507         int                     error;          /* error return value */
508
509         if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
510                 return 0;
511         cur = NULL;
512         error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
513                 flags, XFS_DATA_FORK);
514         if (cur) {
515                 cur->bc_private.b.allocated = 0;
516                 xfs_btree_del_cursor(cur,
517                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
518         }
519         return error;
520 }
521
522 /*
523  * Called from xfs_bmap_add_attrfork to handle local format files.
524  */
525 STATIC int                                      /* error */
526 xfs_bmap_add_attrfork_local(
527         xfs_trans_t             *tp,            /* transaction pointer */
528         xfs_inode_t             *ip,            /* incore inode pointer */
529         xfs_fsblock_t           *firstblock,    /* first block allocated */
530         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
531         int                     *flags)         /* inode logging flags */
532 {
533         xfs_da_args_t           dargs;          /* args for dir/attr code */
534         int                     error;          /* error return value */
535         xfs_mount_t             *mp;            /* mount structure pointer */
536
537         if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
538                 return 0;
539         if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
540                 mp = ip->i_mount;
541                 memset(&dargs, 0, sizeof(dargs));
542                 dargs.dp = ip;
543                 dargs.firstblock = firstblock;
544                 dargs.flist = flist;
545                 dargs.total = mp->m_dirblkfsbs;
546                 dargs.whichfork = XFS_DATA_FORK;
547                 dargs.trans = tp;
548                 error = xfs_dir2_sf_to_block(&dargs);
549         } else
550                 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
551                         XFS_DATA_FORK);
552         return error;
553 }
554
555 /*
556  * Called by xfs_bmapi to update file extent records and the btree
557  * after allocating space (or doing a delayed allocation).
558  */
559 STATIC int                              /* error */
560 xfs_bmap_add_extent(
561         xfs_inode_t             *ip,    /* incore inode pointer */
562         xfs_extnum_t            idx,    /* extent number to update/insert */
563         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
564         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
565         xfs_fsblock_t           *first, /* pointer to firstblock variable */
566         xfs_bmap_free_t         *flist, /* list of extents to be freed */
567         int                     *logflagsp, /* inode logging flags */
568         xfs_extdelta_t          *delta, /* Change made to incore extents */
569         int                     whichfork, /* data or attr fork */
570         int                     rsvd)   /* OK to use reserved data blocks */
571 {
572         xfs_btree_cur_t         *cur;   /* btree cursor or null */
573         xfs_filblks_t           da_new; /* new count del alloc blocks used */
574         xfs_filblks_t           da_old; /* old count del alloc blocks used */
575         int                     error;  /* error return value */
576         xfs_ifork_t             *ifp;   /* inode fork ptr */
577         int                     logflags; /* returned value */
578         xfs_extnum_t            nextents; /* number of extents in file now */
579
580         XFS_STATS_INC(xs_add_exlist);
581         cur = *curp;
582         ifp = XFS_IFORK_PTR(ip, whichfork);
583         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
584         ASSERT(idx <= nextents);
585         da_old = da_new = 0;
586         error = 0;
587         /*
588          * This is the first extent added to a new/empty file.
589          * Special case this one, so other routines get to assume there are
590          * already extents in the list.
591          */
592         if (nextents == 0) {
593                 XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL,
594                         whichfork);
595                 xfs_iext_insert(ip, 0, 1, new,
596                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
597
598                 ASSERT(cur == NULL);
599                 ifp->if_lastex = 0;
600                 if (!isnullstartblock(new->br_startblock)) {
601                         XFS_IFORK_NEXT_SET(ip, whichfork, 1);
602                         logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
603                 } else
604                         logflags = 0;
605                 /* DELTA: single new extent */
606                 if (delta) {
607                         if (delta->xed_startoff > new->br_startoff)
608                                 delta->xed_startoff = new->br_startoff;
609                         if (delta->xed_blockcount <
610                                         new->br_startoff + new->br_blockcount)
611                                 delta->xed_blockcount = new->br_startoff +
612                                                 new->br_blockcount;
613                 }
614         }
615         /*
616          * Any kind of new delayed allocation goes here.
617          */
618         else if (isnullstartblock(new->br_startblock)) {
619                 if (cur)
620                         ASSERT((cur->bc_private.b.flags &
621                                 XFS_BTCUR_BPRV_WASDEL) == 0);
622                 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
623                                 &logflags, delta, rsvd)))
624                         goto done;
625         }
626         /*
627          * Real allocation off the end of the file.
628          */
629         else if (idx == nextents) {
630                 if (cur)
631                         ASSERT((cur->bc_private.b.flags &
632                                 XFS_BTCUR_BPRV_WASDEL) == 0);
633                 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
634                                 &logflags, delta, whichfork)))
635                         goto done;
636         } else {
637                 xfs_bmbt_irec_t prev;   /* old extent at offset idx */
638
639                 /*
640                  * Get the record referred to by idx.
641                  */
642                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
643                 /*
644                  * If it's a real allocation record, and the new allocation ends
645                  * after the start of the referred to record, then we're filling
646                  * in a delayed or unwritten allocation with a real one, or
647                  * converting real back to unwritten.
648                  */
649                 if (!isnullstartblock(new->br_startblock) &&
650                     new->br_startoff + new->br_blockcount > prev.br_startoff) {
651                         if (prev.br_state != XFS_EXT_UNWRITTEN &&
652                             isnullstartblock(prev.br_startblock)) {
653                                 da_old = startblockval(prev.br_startblock);
654                                 if (cur)
655                                         ASSERT(cur->bc_private.b.flags &
656                                                 XFS_BTCUR_BPRV_WASDEL);
657                                 if ((error = xfs_bmap_add_extent_delay_real(ip,
658                                         idx, &cur, new, &da_new, first, flist,
659                                         &logflags, delta, rsvd)))
660                                         goto done;
661                         } else if (new->br_state == XFS_EXT_NORM) {
662                                 ASSERT(new->br_state == XFS_EXT_NORM);
663                                 if ((error = xfs_bmap_add_extent_unwritten_real(
664                                         ip, idx, &cur, new, &logflags, delta)))
665                                         goto done;
666                         } else {
667                                 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
668                                 if ((error = xfs_bmap_add_extent_unwritten_real(
669                                         ip, idx, &cur, new, &logflags, delta)))
670                                         goto done;
671                         }
672                         ASSERT(*curp == cur || *curp == NULL);
673                 }
674                 /*
675                  * Otherwise we're filling in a hole with an allocation.
676                  */
677                 else {
678                         if (cur)
679                                 ASSERT((cur->bc_private.b.flags &
680                                         XFS_BTCUR_BPRV_WASDEL) == 0);
681                         if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
682                                         new, &logflags, delta, whichfork)))
683                                 goto done;
684                 }
685         }
686
687         ASSERT(*curp == cur || *curp == NULL);
688         /*
689          * Convert to a btree if necessary.
690          */
691         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
692             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
693                 int     tmp_logflags;   /* partial log flag return val */
694
695                 ASSERT(cur == NULL);
696                 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
697                         flist, &cur, da_old > 0, &tmp_logflags, whichfork);
698                 logflags |= tmp_logflags;
699                 if (error)
700                         goto done;
701         }
702         /*
703          * Adjust for changes in reserved delayed indirect blocks.
704          * Nothing to do for disk quotas here.
705          */
706         if (da_old || da_new) {
707                 xfs_filblks_t   nblks;
708
709                 nblks = da_new;
710                 if (cur)
711                         nblks += cur->bc_private.b.allocated;
712                 ASSERT(nblks <= da_old);
713                 if (nblks < da_old)
714                         xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
715                                 (int64_t)(da_old - nblks), rsvd);
716         }
717         /*
718          * Clear out the allocated field, done with it now in any case.
719          */
720         if (cur) {
721                 cur->bc_private.b.allocated = 0;
722                 *curp = cur;
723         }
724 done:
725 #ifdef DEBUG
726         if (!error)
727                 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
728 #endif
729         *logflagsp = logflags;
730         return error;
731 }
732
733 /*
734  * Called by xfs_bmap_add_extent to handle cases converting a delayed
735  * allocation to a real allocation.
736  */
737 STATIC int                              /* error */
738 xfs_bmap_add_extent_delay_real(
739         xfs_inode_t             *ip,    /* incore inode pointer */
740         xfs_extnum_t            idx,    /* extent number to update/insert */
741         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
742         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
743         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
744         xfs_fsblock_t           *first, /* pointer to firstblock variable */
745         xfs_bmap_free_t         *flist, /* list of extents to be freed */
746         int                     *logflagsp, /* inode logging flags */
747         xfs_extdelta_t          *delta, /* Change made to incore extents */
748         int                     rsvd)   /* OK to use reserved data block allocation */
749 {
750         xfs_btree_cur_t         *cur;   /* btree cursor */
751         int                     diff;   /* temp value */
752         xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
753         int                     error;  /* error return value */
754         int                     i;      /* temp state */
755         xfs_ifork_t             *ifp;   /* inode fork pointer */
756         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
757         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
758                                         /* left is 0, right is 1, prev is 2 */
759         int                     rval=0; /* return value (logging flags) */
760         int                     state = 0;/* state bits, accessed thru macros */
761         xfs_filblks_t           temp=0; /* value for dnew calculations */
762         xfs_filblks_t           temp2=0;/* value for dnew calculations */
763         int                     tmp_rval;       /* partial logging flags */
764
765 #define LEFT            r[0]
766 #define RIGHT           r[1]
767 #define PREV            r[2]
768
769         /*
770          * Set up a bunch of variables to make the tests simpler.
771          */
772         cur = *curp;
773         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
774         ep = xfs_iext_get_ext(ifp, idx);
775         xfs_bmbt_get_all(ep, &PREV);
776         new_endoff = new->br_startoff + new->br_blockcount;
777         ASSERT(PREV.br_startoff <= new->br_startoff);
778         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
779
780         /*
781          * Set flags determining what part of the previous delayed allocation
782          * extent is being replaced by a real allocation.
783          */
784         if (PREV.br_startoff == new->br_startoff)
785                 state |= BMAP_LEFT_FILLING;
786         if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
787                 state |= BMAP_RIGHT_FILLING;
788
789         /*
790          * Check and set flags if this segment has a left neighbor.
791          * Don't set contiguous if the combined extent would be too large.
792          */
793         if (idx > 0) {
794                 state |= BMAP_LEFT_VALID;
795                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
796
797                 if (isnullstartblock(LEFT.br_startblock))
798                         state |= BMAP_LEFT_DELAY;
799         }
800
801         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
802             LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
803             LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
804             LEFT.br_state == new->br_state &&
805             LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
806                 state |= BMAP_LEFT_CONTIG;
807
808         /*
809          * Check and set flags if this segment has a right neighbor.
810          * Don't set contiguous if the combined extent would be too large.
811          * Also check for all-three-contiguous being too large.
812          */
813         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
814                 state |= BMAP_RIGHT_VALID;
815                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
816
817                 if (isnullstartblock(RIGHT.br_startblock))
818                         state |= BMAP_RIGHT_DELAY;
819         }
820
821         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
822             new_endoff == RIGHT.br_startoff &&
823             new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
824             new->br_state == RIGHT.br_state &&
825             new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
826             ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
827                        BMAP_RIGHT_FILLING)) !=
828                       (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
829                        BMAP_RIGHT_FILLING) ||
830              LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
831                         <= MAXEXTLEN))
832                 state |= BMAP_RIGHT_CONTIG;
833
834         error = 0;
835         /*
836          * Switch out based on the FILLING and CONTIG state bits.
837          */
838         switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
839                          BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
840         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
841              BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
842                 /*
843                  * Filling in all of a previously delayed allocation extent.
844                  * The left and right neighbors are both contiguous with new.
845                  */
846                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
847                         XFS_DATA_FORK);
848                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
849                         LEFT.br_blockcount + PREV.br_blockcount +
850                         RIGHT.br_blockcount);
851                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
852                         XFS_DATA_FORK);
853                 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
854                 xfs_iext_remove(ip, idx, 2, state);
855                 ip->i_df.if_lastex = idx - 1;
856                 ip->i_d.di_nextents--;
857                 if (cur == NULL)
858                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
859                 else {
860                         rval = XFS_ILOG_CORE;
861                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
862                                         RIGHT.br_startblock,
863                                         RIGHT.br_blockcount, &i)))
864                                 goto done;
865                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
866                         if ((error = xfs_btree_delete(cur, &i)))
867                                 goto done;
868                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
869                         if ((error = xfs_btree_decrement(cur, 0, &i)))
870                                 goto done;
871                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
872                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
873                                         LEFT.br_startblock,
874                                         LEFT.br_blockcount +
875                                         PREV.br_blockcount +
876                                         RIGHT.br_blockcount, LEFT.br_state)))
877                                 goto done;
878                 }
879                 *dnew = 0;
880                 /* DELTA: Three in-core extents are replaced by one. */
881                 temp = LEFT.br_startoff;
882                 temp2 = LEFT.br_blockcount +
883                         PREV.br_blockcount +
884                         RIGHT.br_blockcount;
885                 break;
886
887         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
888                 /*
889                  * Filling in all of a previously delayed allocation extent.
890                  * The left neighbor is contiguous, the right is not.
891                  */
892                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
893                         XFS_DATA_FORK);
894                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
895                         LEFT.br_blockcount + PREV.br_blockcount);
896                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
897                         XFS_DATA_FORK);
898                 ip->i_df.if_lastex = idx - 1;
899                 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
900                 xfs_iext_remove(ip, idx, 1, state);
901                 if (cur == NULL)
902                         rval = XFS_ILOG_DEXT;
903                 else {
904                         rval = 0;
905                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
906                                         LEFT.br_startblock, LEFT.br_blockcount,
907                                         &i)))
908                                 goto done;
909                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
910                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
911                                         LEFT.br_startblock,
912                                         LEFT.br_blockcount +
913                                         PREV.br_blockcount, LEFT.br_state)))
914                                 goto done;
915                 }
916                 *dnew = 0;
917                 /* DELTA: Two in-core extents are replaced by one. */
918                 temp = LEFT.br_startoff;
919                 temp2 = LEFT.br_blockcount +
920                         PREV.br_blockcount;
921                 break;
922
923         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
924                 /*
925                  * Filling in all of a previously delayed allocation extent.
926                  * The right neighbor is contiguous, the left is not.
927                  */
928                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
929                 xfs_bmbt_set_startblock(ep, new->br_startblock);
930                 xfs_bmbt_set_blockcount(ep,
931                         PREV.br_blockcount + RIGHT.br_blockcount);
932                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
933                 ip->i_df.if_lastex = idx;
934                 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
935                 xfs_iext_remove(ip, idx + 1, 1, state);
936                 if (cur == NULL)
937                         rval = XFS_ILOG_DEXT;
938                 else {
939                         rval = 0;
940                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
941                                         RIGHT.br_startblock,
942                                         RIGHT.br_blockcount, &i)))
943                                 goto done;
944                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
945                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
946                                         new->br_startblock,
947                                         PREV.br_blockcount +
948                                         RIGHT.br_blockcount, PREV.br_state)))
949                                 goto done;
950                 }
951                 *dnew = 0;
952                 /* DELTA: Two in-core extents are replaced by one. */
953                 temp = PREV.br_startoff;
954                 temp2 = PREV.br_blockcount +
955                         RIGHT.br_blockcount;
956                 break;
957
958         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
959                 /*
960                  * Filling in all of a previously delayed allocation extent.
961                  * Neither the left nor right neighbors are contiguous with
962                  * the new one.
963                  */
964                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
965                 xfs_bmbt_set_startblock(ep, new->br_startblock);
966                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
967                 ip->i_df.if_lastex = idx;
968                 ip->i_d.di_nextents++;
969                 if (cur == NULL)
970                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
971                 else {
972                         rval = XFS_ILOG_CORE;
973                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
974                                         new->br_startblock, new->br_blockcount,
975                                         &i)))
976                                 goto done;
977                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
978                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
979                         if ((error = xfs_btree_insert(cur, &i)))
980                                 goto done;
981                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
982                 }
983                 *dnew = 0;
984                 /* DELTA: The in-core extent described by new changed type. */
985                 temp = new->br_startoff;
986                 temp2 = new->br_blockcount;
987                 break;
988
989         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
990                 /*
991                  * Filling in the first part of a previous delayed allocation.
992                  * The left neighbor is contiguous.
993                  */
994                 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
995                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
996                         LEFT.br_blockcount + new->br_blockcount);
997                 xfs_bmbt_set_startoff(ep,
998                         PREV.br_startoff + new->br_blockcount);
999                 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
1000                 temp = PREV.br_blockcount - new->br_blockcount;
1001                 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
1002                 xfs_bmbt_set_blockcount(ep, temp);
1003                 ip->i_df.if_lastex = idx - 1;
1004                 if (cur == NULL)
1005                         rval = XFS_ILOG_DEXT;
1006                 else {
1007                         rval = 0;
1008                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
1009                                         LEFT.br_startblock, LEFT.br_blockcount,
1010                                         &i)))
1011                                 goto done;
1012                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1013                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1014                                         LEFT.br_startblock,
1015                                         LEFT.br_blockcount +
1016                                         new->br_blockcount,
1017                                         LEFT.br_state)))
1018                                 goto done;
1019                 }
1020                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1021                         startblockval(PREV.br_startblock));
1022                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1023                 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
1024                 *dnew = temp;
1025                 /* DELTA: The boundary between two in-core extents moved. */
1026                 temp = LEFT.br_startoff;
1027                 temp2 = LEFT.br_blockcount +
1028                         PREV.br_blockcount;
1029                 break;
1030
1031         case BMAP_LEFT_FILLING:
1032                 /*
1033                  * Filling in the first part of a previous delayed allocation.
1034                  * The left neighbor is not contiguous.
1035                  */
1036                 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
1037                 xfs_bmbt_set_startoff(ep, new_endoff);
1038                 temp = PREV.br_blockcount - new->br_blockcount;
1039                 xfs_bmbt_set_blockcount(ep, temp);
1040                 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
1041                         XFS_DATA_FORK);
1042                 xfs_iext_insert(ip, idx, 1, new, state);
1043                 ip->i_df.if_lastex = idx;
1044                 ip->i_d.di_nextents++;
1045                 if (cur == NULL)
1046                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1047                 else {
1048                         rval = XFS_ILOG_CORE;
1049                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1050                                         new->br_startblock, new->br_blockcount,
1051                                         &i)))
1052                                 goto done;
1053                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1054                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1055                         if ((error = xfs_btree_insert(cur, &i)))
1056                                 goto done;
1057                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1058                 }
1059                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1060                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1061                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1062                                         first, flist, &cur, 1, &tmp_rval,
1063                                         XFS_DATA_FORK);
1064                         rval |= tmp_rval;
1065                         if (error)
1066                                 goto done;
1067                 }
1068                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1069                         startblockval(PREV.br_startblock) -
1070                         (cur ? cur->bc_private.b.allocated : 0));
1071                 ep = xfs_iext_get_ext(ifp, idx + 1);
1072                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1073                 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK);
1074                 *dnew = temp;
1075                 /* DELTA: One in-core extent is split in two. */
1076                 temp = PREV.br_startoff;
1077                 temp2 = PREV.br_blockcount;
1078                 break;
1079
1080         case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1081                 /*
1082                  * Filling in the last part of a previous delayed allocation.
1083                  * The right neighbor is contiguous with the new allocation.
1084                  */
1085                 temp = PREV.br_blockcount - new->br_blockcount;
1086                 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
1087                 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
1088                 xfs_bmbt_set_blockcount(ep, temp);
1089                 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1090                         new->br_startoff, new->br_startblock,
1091                         new->br_blockcount + RIGHT.br_blockcount,
1092                         RIGHT.br_state);
1093                 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
1094                 ip->i_df.if_lastex = idx + 1;
1095                 if (cur == NULL)
1096                         rval = XFS_ILOG_DEXT;
1097                 else {
1098                         rval = 0;
1099                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1100                                         RIGHT.br_startblock,
1101                                         RIGHT.br_blockcount, &i)))
1102                                 goto done;
1103                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1104                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1105                                         new->br_startblock,
1106                                         new->br_blockcount +
1107                                         RIGHT.br_blockcount,
1108                                         RIGHT.br_state)))
1109                                 goto done;
1110                 }
1111                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1112                         startblockval(PREV.br_startblock));
1113                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1114                 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
1115                 *dnew = temp;
1116                 /* DELTA: The boundary between two in-core extents moved. */
1117                 temp = PREV.br_startoff;
1118                 temp2 = PREV.br_blockcount +
1119                         RIGHT.br_blockcount;
1120                 break;
1121
1122         case BMAP_RIGHT_FILLING:
1123                 /*
1124                  * Filling in the last part of a previous delayed allocation.
1125                  * The right neighbor is not contiguous.
1126                  */
1127                 temp = PREV.br_blockcount - new->br_blockcount;
1128                 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1129                 xfs_bmbt_set_blockcount(ep, temp);
1130                 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
1131                         XFS_DATA_FORK);
1132                 xfs_iext_insert(ip, idx + 1, 1, new, state);
1133                 ip->i_df.if_lastex = idx + 1;
1134                 ip->i_d.di_nextents++;
1135                 if (cur == NULL)
1136                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1137                 else {
1138                         rval = XFS_ILOG_CORE;
1139                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1140                                         new->br_startblock, new->br_blockcount,
1141                                         &i)))
1142                                 goto done;
1143                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1144                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1145                         if ((error = xfs_btree_insert(cur, &i)))
1146                                 goto done;
1147                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1148                 }
1149                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1150                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1151                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1152                                 first, flist, &cur, 1, &tmp_rval,
1153                                 XFS_DATA_FORK);
1154                         rval |= tmp_rval;
1155                         if (error)
1156                                 goto done;
1157                 }
1158                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1159                         startblockval(PREV.br_startblock) -
1160                         (cur ? cur->bc_private.b.allocated : 0));
1161                 ep = xfs_iext_get_ext(ifp, idx);
1162                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1163                 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1164                 *dnew = temp;
1165                 /* DELTA: One in-core extent is split in two. */
1166                 temp = PREV.br_startoff;
1167                 temp2 = PREV.br_blockcount;
1168                 break;
1169
1170         case 0:
1171                 /*
1172                  * Filling in the middle part of a previous delayed allocation.
1173                  * Contiguity is impossible here.
1174                  * This case is avoided almost all the time.
1175                  */
1176                 temp = new->br_startoff - PREV.br_startoff;
1177                 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
1178                 xfs_bmbt_set_blockcount(ep, temp);
1179                 r[0] = *new;
1180                 r[1].br_state = PREV.br_state;
1181                 r[1].br_startblock = 0;
1182                 r[1].br_startoff = new_endoff;
1183                 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1184                 r[1].br_blockcount = temp2;
1185                 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
1186                         XFS_DATA_FORK);
1187                 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1188                 ip->i_df.if_lastex = idx + 1;
1189                 ip->i_d.di_nextents++;
1190                 if (cur == NULL)
1191                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1192                 else {
1193                         rval = XFS_ILOG_CORE;
1194                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1195                                         new->br_startblock, new->br_blockcount,
1196                                         &i)))
1197                                 goto done;
1198                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1199                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1200                         if ((error = xfs_btree_insert(cur, &i)))
1201                                 goto done;
1202                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1203                 }
1204                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1205                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1206                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1207                                         first, flist, &cur, 1, &tmp_rval,
1208                                         XFS_DATA_FORK);
1209                         rval |= tmp_rval;
1210                         if (error)
1211                                 goto done;
1212                 }
1213                 temp = xfs_bmap_worst_indlen(ip, temp);
1214                 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1215                 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
1216                         (cur ? cur->bc_private.b.allocated : 0));
1217                 if (diff > 0 &&
1218                     xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
1219                         /*
1220                          * Ick gross gag me with a spoon.
1221                          */
1222                         ASSERT(0);      /* want to see if this ever happens! */
1223                         while (diff > 0) {
1224                                 if (temp) {
1225                                         temp--;
1226                                         diff--;
1227                                         if (!diff ||
1228                                             !xfs_mod_incore_sb(ip->i_mount,
1229                                                     XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1230                                                 break;
1231                                 }
1232                                 if (temp2) {
1233                                         temp2--;
1234                                         diff--;
1235                                         if (!diff ||
1236                                             !xfs_mod_incore_sb(ip->i_mount,
1237                                                     XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1238                                                 break;
1239                                 }
1240                         }
1241                 }
1242                 ep = xfs_iext_get_ext(ifp, idx);
1243                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1244                 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
1245                 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
1246                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1247                         nullstartblock((int)temp2));
1248                 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
1249                 *dnew = temp + temp2;
1250                 /* DELTA: One in-core extent is split in three. */
1251                 temp = PREV.br_startoff;
1252                 temp2 = PREV.br_blockcount;
1253                 break;
1254
1255         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1256         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1257         case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1258         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1259         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1260         case BMAP_LEFT_CONTIG:
1261         case BMAP_RIGHT_CONTIG:
1262                 /*
1263                  * These cases are all impossible.
1264                  */
1265                 ASSERT(0);
1266         }
1267         *curp = cur;
1268         if (delta) {
1269                 temp2 += temp;
1270                 if (delta->xed_startoff > temp)
1271                         delta->xed_startoff = temp;
1272                 if (delta->xed_blockcount < temp2)
1273                         delta->xed_blockcount = temp2;
1274         }
1275 done:
1276         *logflagsp = rval;
1277         return error;
1278 #undef  LEFT
1279 #undef  RIGHT
1280 #undef  PREV
1281 }
1282
1283 /*
1284  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1285  * allocation to a real allocation or vice versa.
1286  */
1287 STATIC int                              /* error */
1288 xfs_bmap_add_extent_unwritten_real(
1289         xfs_inode_t             *ip,    /* incore inode pointer */
1290         xfs_extnum_t            idx,    /* extent number to update/insert */
1291         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
1292         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1293         int                     *logflagsp, /* inode logging flags */
1294         xfs_extdelta_t          *delta) /* Change made to incore extents */
1295 {
1296         xfs_btree_cur_t         *cur;   /* btree cursor */
1297         xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
1298         int                     error;  /* error return value */
1299         int                     i;      /* temp state */
1300         xfs_ifork_t             *ifp;   /* inode fork pointer */
1301         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
1302         xfs_exntst_t            newext; /* new extent state */
1303         xfs_exntst_t            oldext; /* old extent state */
1304         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
1305                                         /* left is 0, right is 1, prev is 2 */
1306         int                     rval=0; /* return value (logging flags) */
1307         int                     state = 0;/* state bits, accessed thru macros */
1308         xfs_filblks_t           temp=0;
1309         xfs_filblks_t           temp2=0;
1310
1311 #define LEFT            r[0]
1312 #define RIGHT           r[1]
1313 #define PREV            r[2]
1314         /*
1315          * Set up a bunch of variables to make the tests simpler.
1316          */
1317         error = 0;
1318         cur = *curp;
1319         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1320         ep = xfs_iext_get_ext(ifp, idx);
1321         xfs_bmbt_get_all(ep, &PREV);
1322         newext = new->br_state;
1323         oldext = (newext == XFS_EXT_UNWRITTEN) ?
1324                 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1325         ASSERT(PREV.br_state == oldext);
1326         new_endoff = new->br_startoff + new->br_blockcount;
1327         ASSERT(PREV.br_startoff <= new->br_startoff);
1328         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1329
1330         /*
1331          * Set flags determining what part of the previous oldext allocation
1332          * extent is being replaced by a newext allocation.
1333          */
1334         if (PREV.br_startoff == new->br_startoff)
1335                 state |= BMAP_LEFT_FILLING;
1336         if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1337                 state |= BMAP_RIGHT_FILLING;
1338
1339         /*
1340          * Check and set flags if this segment has a left neighbor.
1341          * Don't set contiguous if the combined extent would be too large.
1342          */
1343         if (idx > 0) {
1344                 state |= BMAP_LEFT_VALID;
1345                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1346
1347                 if (isnullstartblock(LEFT.br_startblock))
1348                         state |= BMAP_LEFT_DELAY;
1349         }
1350
1351         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1352             LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1353             LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1354             LEFT.br_state == newext &&
1355             LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1356                 state |= BMAP_LEFT_CONTIG;
1357
1358         /*
1359          * Check and set flags if this segment has a right neighbor.
1360          * Don't set contiguous if the combined extent would be too large.
1361          * Also check for all-three-contiguous being too large.
1362          */
1363         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1364                 state |= BMAP_RIGHT_VALID;
1365                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1366                 if (isnullstartblock(RIGHT.br_startblock))
1367                         state |= BMAP_RIGHT_DELAY;
1368         }
1369
1370         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1371             new_endoff == RIGHT.br_startoff &&
1372             new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1373             newext == RIGHT.br_state &&
1374             new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1375             ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1376                        BMAP_RIGHT_FILLING)) !=
1377                       (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1378                        BMAP_RIGHT_FILLING) ||
1379              LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1380                         <= MAXEXTLEN))
1381                 state |= BMAP_RIGHT_CONTIG;
1382
1383         /*
1384          * Switch out based on the FILLING and CONTIG state bits.
1385          */
1386         switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1387                          BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1388         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1389              BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1390                 /*
1391                  * Setting all of a previous oldext extent to newext.
1392                  * The left and right neighbors are both contiguous with new.
1393                  */
1394                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
1395                         XFS_DATA_FORK);
1396                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1397                         LEFT.br_blockcount + PREV.br_blockcount +
1398                         RIGHT.br_blockcount);
1399                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
1400                         XFS_DATA_FORK);
1401                 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
1402                 xfs_iext_remove(ip, idx, 2, state);
1403                 ip->i_df.if_lastex = idx - 1;
1404                 ip->i_d.di_nextents -= 2;
1405                 if (cur == NULL)
1406                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1407                 else {
1408                         rval = XFS_ILOG_CORE;
1409                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1410                                         RIGHT.br_startblock,
1411                                         RIGHT.br_blockcount, &i)))
1412                                 goto done;
1413                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1414                         if ((error = xfs_btree_delete(cur, &i)))
1415                                 goto done;
1416                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1417                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1418                                 goto done;
1419                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1420                         if ((error = xfs_btree_delete(cur, &i)))
1421                                 goto done;
1422                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1423                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1424                                 goto done;
1425                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1426                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1427                                 LEFT.br_startblock,
1428                                 LEFT.br_blockcount + PREV.br_blockcount +
1429                                 RIGHT.br_blockcount, LEFT.br_state)))
1430                                 goto done;
1431                 }
1432                 /* DELTA: Three in-core extents are replaced by one. */
1433                 temp = LEFT.br_startoff;
1434                 temp2 = LEFT.br_blockcount +
1435                         PREV.br_blockcount +
1436                         RIGHT.br_blockcount;
1437                 break;
1438
1439         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1440                 /*
1441                  * Setting all of a previous oldext extent to newext.
1442                  * The left neighbor is contiguous, the right is not.
1443                  */
1444                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
1445                         XFS_DATA_FORK);
1446                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1447                         LEFT.br_blockcount + PREV.br_blockcount);
1448                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
1449                         XFS_DATA_FORK);
1450                 ip->i_df.if_lastex = idx - 1;
1451                 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
1452                 xfs_iext_remove(ip, idx, 1, state);
1453                 ip->i_d.di_nextents--;
1454                 if (cur == NULL)
1455                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1456                 else {
1457                         rval = XFS_ILOG_CORE;
1458                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1459                                         PREV.br_startblock, PREV.br_blockcount,
1460                                         &i)))
1461                                 goto done;
1462                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1463                         if ((error = xfs_btree_delete(cur, &i)))
1464                                 goto done;
1465                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1466                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1467                                 goto done;
1468                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1469                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1470                                 LEFT.br_startblock,
1471                                 LEFT.br_blockcount + PREV.br_blockcount,
1472                                 LEFT.br_state)))
1473                                 goto done;
1474                 }
1475                 /* DELTA: Two in-core extents are replaced by one. */
1476                 temp = LEFT.br_startoff;
1477                 temp2 = LEFT.br_blockcount +
1478                         PREV.br_blockcount;
1479                 break;
1480
1481         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1482                 /*
1483                  * Setting all of a previous oldext extent to newext.
1484                  * The right neighbor is contiguous, the left is not.
1485                  */
1486                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx,
1487                         XFS_DATA_FORK);
1488                 xfs_bmbt_set_blockcount(ep,
1489                         PREV.br_blockcount + RIGHT.br_blockcount);
1490                 xfs_bmbt_set_state(ep, newext);
1491                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx,
1492                         XFS_DATA_FORK);
1493                 ip->i_df.if_lastex = idx;
1494                 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
1495                 xfs_iext_remove(ip, idx + 1, 1, state);
1496                 ip->i_d.di_nextents--;
1497                 if (cur == NULL)
1498                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1499                 else {
1500                         rval = XFS_ILOG_CORE;
1501                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1502                                         RIGHT.br_startblock,
1503                                         RIGHT.br_blockcount, &i)))
1504                                 goto done;
1505                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1506                         if ((error = xfs_btree_delete(cur, &i)))
1507                                 goto done;
1508                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1509                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1510                                 goto done;
1511                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1512                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1513                                 new->br_startblock,
1514                                 new->br_blockcount + RIGHT.br_blockcount,
1515                                 newext)))
1516                                 goto done;
1517                 }
1518                 /* DELTA: Two in-core extents are replaced by one. */
1519                 temp = PREV.br_startoff;
1520                 temp2 = PREV.br_blockcount +
1521                         RIGHT.br_blockcount;
1522                 break;
1523
1524         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1525                 /*
1526                  * Setting all of a previous oldext extent to newext.
1527                  * Neither the left nor right neighbors are contiguous with
1528                  * the new one.
1529                  */
1530                 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx,
1531                         XFS_DATA_FORK);
1532                 xfs_bmbt_set_state(ep, newext);
1533                 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx,
1534                         XFS_DATA_FORK);
1535                 ip->i_df.if_lastex = idx;
1536                 if (cur == NULL)
1537                         rval = XFS_ILOG_DEXT;
1538                 else {
1539                         rval = 0;
1540                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1541                                         new->br_startblock, new->br_blockcount,
1542                                         &i)))
1543                                 goto done;
1544                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1545                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1546                                 new->br_startblock, new->br_blockcount,
1547                                 newext)))
1548                                 goto done;
1549                 }
1550                 /* DELTA: The in-core extent described by new changed type. */
1551                 temp = new->br_startoff;
1552                 temp2 = new->br_blockcount;
1553                 break;
1554
1555         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1556                 /*
1557                  * Setting the first part of a previous oldext extent to newext.
1558                  * The left neighbor is contiguous.
1559                  */
1560                 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1,
1561                         XFS_DATA_FORK);
1562                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1563                         LEFT.br_blockcount + new->br_blockcount);
1564                 xfs_bmbt_set_startoff(ep,
1565                         PREV.br_startoff + new->br_blockcount);
1566                 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1,
1567                         XFS_DATA_FORK);
1568                 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx,
1569                         XFS_DATA_FORK);
1570                 xfs_bmbt_set_startblock(ep,
1571                         new->br_startblock + new->br_blockcount);
1572                 xfs_bmbt_set_blockcount(ep,
1573                         PREV.br_blockcount - new->br_blockcount);
1574                 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx,
1575                         XFS_DATA_FORK);
1576                 ip->i_df.if_lastex = idx - 1;
1577                 if (cur == NULL)
1578                         rval = XFS_ILOG_DEXT;
1579                 else {
1580                         rval = 0;
1581                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1582                                         PREV.br_startblock, PREV.br_blockcount,
1583                                         &i)))
1584                                 goto done;
1585                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1586                         if ((error = xfs_bmbt_update(cur,
1587                                 PREV.br_startoff + new->br_blockcount,
1588                                 PREV.br_startblock + new->br_blockcount,
1589                                 PREV.br_blockcount - new->br_blockcount,
1590                                 oldext)))
1591                                 goto done;
1592                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1593                                 goto done;
1594                         if (xfs_bmbt_update(cur, LEFT.br_startoff,
1595                                 LEFT.br_startblock,
1596                                 LEFT.br_blockcount + new->br_blockcount,
1597                                 LEFT.br_state))
1598                                 goto done;
1599                 }
1600                 /* DELTA: The boundary between two in-core extents moved. */
1601                 temp = LEFT.br_startoff;
1602                 temp2 = LEFT.br_blockcount +
1603                         PREV.br_blockcount;
1604                 break;
1605
1606         case BMAP_LEFT_FILLING:
1607                 /*
1608                  * Setting the first part of a previous oldext extent to newext.
1609                  * The left neighbor is not contiguous.
1610                  */
1611                 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
1612                 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1613                 xfs_bmbt_set_startoff(ep, new_endoff);
1614                 xfs_bmbt_set_blockcount(ep,
1615                         PREV.br_blockcount - new->br_blockcount);
1616                 xfs_bmbt_set_startblock(ep,
1617                         new->br_startblock + new->br_blockcount);
1618                 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK);
1619                 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
1620                         XFS_DATA_FORK);
1621                 xfs_iext_insert(ip, idx, 1, new, state);
1622                 ip->i_df.if_lastex = idx;
1623                 ip->i_d.di_nextents++;
1624                 if (cur == NULL)
1625                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1626                 else {
1627                         rval = XFS_ILOG_CORE;
1628                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1629                                         PREV.br_startblock, PREV.br_blockcount,
1630                                         &i)))
1631                                 goto done;
1632                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1633                         if ((error = xfs_bmbt_update(cur,
1634                                 PREV.br_startoff + new->br_blockcount,
1635                                 PREV.br_startblock + new->br_blockcount,
1636                                 PREV.br_blockcount - new->br_blockcount,
1637                                 oldext)))
1638                                 goto done;
1639                         cur->bc_rec.b = *new;
1640                         if ((error = xfs_btree_insert(cur, &i)))
1641                                 goto done;
1642                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1643                 }
1644                 /* DELTA: One in-core extent is split in two. */
1645                 temp = PREV.br_startoff;
1646                 temp2 = PREV.br_blockcount;
1647                 break;
1648
1649         case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1650                 /*
1651                  * Setting the last part of a previous oldext extent to newext.
1652                  * The right neighbor is contiguous with the new allocation.
1653                  */
1654                 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx,
1655                         XFS_DATA_FORK);
1656                 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
1657                         XFS_DATA_FORK);
1658                 xfs_bmbt_set_blockcount(ep,
1659                         PREV.br_blockcount - new->br_blockcount);
1660                 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx,
1661                         XFS_DATA_FORK);
1662                 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1663                         new->br_startoff, new->br_startblock,
1664                         new->br_blockcount + RIGHT.br_blockcount, newext);
1665                 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1,
1666                         XFS_DATA_FORK);
1667                 ip->i_df.if_lastex = idx + 1;
1668                 if (cur == NULL)
1669                         rval = XFS_ILOG_DEXT;
1670                 else {
1671                         rval = 0;
1672                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1673                                         PREV.br_startblock,
1674                                         PREV.br_blockcount, &i)))
1675                                 goto done;
1676                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1677                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1678                                 PREV.br_startblock,
1679                                 PREV.br_blockcount - new->br_blockcount,
1680                                 oldext)))
1681                                 goto done;
1682                         if ((error = xfs_btree_increment(cur, 0, &i)))
1683                                 goto done;
1684                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1685                                 new->br_startblock,
1686                                 new->br_blockcount + RIGHT.br_blockcount,
1687                                 newext)))
1688                                 goto done;
1689                 }
1690                 /* DELTA: The boundary between two in-core extents moved. */
1691                 temp = PREV.br_startoff;
1692                 temp2 = PREV.br_blockcount +
1693                         RIGHT.br_blockcount;
1694                 break;
1695
1696         case BMAP_RIGHT_FILLING:
1697                 /*
1698                  * Setting the last part of a previous oldext extent to newext.
1699                  * The right neighbor is not contiguous.
1700                  */
1701                 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1702                 xfs_bmbt_set_blockcount(ep,
1703                         PREV.br_blockcount - new->br_blockcount);
1704                 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1705                 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
1706                         XFS_DATA_FORK);
1707                 xfs_iext_insert(ip, idx + 1, 1, new, state);
1708                 ip->i_df.if_lastex = idx + 1;
1709                 ip->i_d.di_nextents++;
1710                 if (cur == NULL)
1711                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1712                 else {
1713                         rval = XFS_ILOG_CORE;
1714                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1715                                         PREV.br_startblock, PREV.br_blockcount,
1716                                         &i)))
1717                                 goto done;
1718                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1719                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1720                                 PREV.br_startblock,
1721                                 PREV.br_blockcount - new->br_blockcount,
1722                                 oldext)))
1723                                 goto done;
1724                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1725                                         new->br_startblock, new->br_blockcount,
1726                                         &i)))
1727                                 goto done;
1728                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1729                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1730                         if ((error = xfs_btree_insert(cur, &i)))
1731                                 goto done;
1732                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1733                 }
1734                 /* DELTA: One in-core extent is split in two. */
1735                 temp = PREV.br_startoff;
1736                 temp2 = PREV.br_blockcount;
1737                 break;
1738
1739         case 0:
1740                 /*
1741                  * Setting the middle part of a previous oldext extent to
1742                  * newext.  Contiguity is impossible here.
1743                  * One extent becomes three extents.
1744                  */
1745                 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
1746                 xfs_bmbt_set_blockcount(ep,
1747                         new->br_startoff - PREV.br_startoff);
1748                 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
1749                 r[0] = *new;
1750                 r[1].br_startoff = new_endoff;
1751                 r[1].br_blockcount =
1752                         PREV.br_startoff + PREV.br_blockcount - new_endoff;
1753                 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1754                 r[1].br_state = oldext;
1755                 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
1756                         XFS_DATA_FORK);
1757                 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1758                 ip->i_df.if_lastex = idx + 1;
1759                 ip->i_d.di_nextents += 2;
1760                 if (cur == NULL)
1761                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1762                 else {
1763                         rval = XFS_ILOG_CORE;
1764                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1765                                         PREV.br_startblock, PREV.br_blockcount,
1766                                         &i)))
1767                                 goto done;
1768                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1769                         /* new right extent - oldext */
1770                         if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1771                                 r[1].br_startblock, r[1].br_blockcount,
1772                                 r[1].br_state)))
1773                                 goto done;
1774                         /* new left extent - oldext */
1775                         cur->bc_rec.b = PREV;
1776                         cur->bc_rec.b.br_blockcount =
1777                                 new->br_startoff - PREV.br_startoff;
1778                         if ((error = xfs_btree_insert(cur, &i)))
1779                                 goto done;
1780                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1781                         /*
1782                          * Reset the cursor to the position of the new extent
1783                          * we are about to insert as we can't trust it after
1784                          * the previous insert.
1785                          */
1786                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1787                                         new->br_startblock, new->br_blockcount,
1788                                         &i)))
1789                                 goto done;
1790                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1791                         /* new middle extent - newext */
1792                         cur->bc_rec.b.br_state = new->br_state;
1793                         if ((error = xfs_btree_insert(cur, &i)))
1794                                 goto done;
1795                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1796                 }
1797                 /* DELTA: One in-core extent is split in three. */
1798                 temp = PREV.br_startoff;
1799                 temp2 = PREV.br_blockcount;
1800                 break;
1801
1802         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1803         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1804         case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1805         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1806         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1807         case BMAP_LEFT_CONTIG:
1808         case BMAP_RIGHT_CONTIG:
1809                 /*
1810                  * These cases are all impossible.
1811                  */
1812                 ASSERT(0);
1813         }
1814         *curp = cur;
1815         if (delta) {
1816                 temp2 += temp;
1817                 if (delta->xed_startoff > temp)
1818                         delta->xed_startoff = temp;
1819                 if (delta->xed_blockcount < temp2)
1820                         delta->xed_blockcount = temp2;
1821         }
1822 done:
1823         *logflagsp = rval;
1824         return error;
1825 #undef  LEFT
1826 #undef  RIGHT
1827 #undef  PREV
1828 }
1829
1830 /*
1831  * Called by xfs_bmap_add_extent to handle cases converting a hole
1832  * to a delayed allocation.
1833  */
1834 /*ARGSUSED*/
1835 STATIC int                              /* error */
1836 xfs_bmap_add_extent_hole_delay(
1837         xfs_inode_t             *ip,    /* incore inode pointer */
1838         xfs_extnum_t            idx,    /* extent number to update/insert */
1839         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1840         int                     *logflagsp, /* inode logging flags */
1841         xfs_extdelta_t          *delta, /* Change made to incore extents */
1842         int                     rsvd)           /* OK to allocate reserved blocks */
1843 {
1844         xfs_bmbt_rec_host_t     *ep;    /* extent record for idx */
1845         xfs_ifork_t             *ifp;   /* inode fork pointer */
1846         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1847         xfs_filblks_t           newlen=0;       /* new indirect size */
1848         xfs_filblks_t           oldlen=0;       /* old indirect size */
1849         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1850         int                     state;  /* state bits, accessed thru macros */
1851         xfs_filblks_t           temp=0; /* temp for indirect calculations */
1852         xfs_filblks_t           temp2=0;
1853
1854         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1855         ep = xfs_iext_get_ext(ifp, idx);
1856         state = 0;
1857         ASSERT(isnullstartblock(new->br_startblock));
1858
1859         /*
1860          * Check and set flags if this segment has a left neighbor
1861          */
1862         if (idx > 0) {
1863                 state |= BMAP_LEFT_VALID;
1864                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1865
1866                 if (isnullstartblock(left.br_startblock))
1867                         state |= BMAP_LEFT_DELAY;
1868         }
1869
1870         /*
1871          * Check and set flags if the current (right) segment exists.
1872          * If it doesn't exist, we're converting the hole at end-of-file.
1873          */
1874         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1875                 state |= BMAP_RIGHT_VALID;
1876                 xfs_bmbt_get_all(ep, &right);
1877
1878                 if (isnullstartblock(right.br_startblock))
1879                         state |= BMAP_RIGHT_DELAY;
1880         }
1881
1882         /*
1883          * Set contiguity flags on the left and right neighbors.
1884          * Don't let extents get too large, even if the pieces are contiguous.
1885          */
1886         if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1887             left.br_startoff + left.br_blockcount == new->br_startoff &&
1888             left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1889                 state |= BMAP_LEFT_CONTIG;
1890
1891         if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1892             new->br_startoff + new->br_blockcount == right.br_startoff &&
1893             new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1894             (!(state & BMAP_LEFT_CONTIG) ||
1895              (left.br_blockcount + new->br_blockcount +
1896               right.br_blockcount <= MAXEXTLEN)))
1897                 state |= BMAP_RIGHT_CONTIG;
1898
1899         /*
1900          * Switch out based on the contiguity flags.
1901          */
1902         switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1903         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1904                 /*
1905                  * New allocation is contiguous with delayed allocations
1906                  * on the left and on the right.
1907                  * Merge all three into a single extent record.
1908                  */
1909                 temp = left.br_blockcount + new->br_blockcount +
1910                         right.br_blockcount;
1911                 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
1912                         XFS_DATA_FORK);
1913                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1914                 oldlen = startblockval(left.br_startblock) +
1915                         startblockval(new->br_startblock) +
1916                         startblockval(right.br_startblock);
1917                 newlen = xfs_bmap_worst_indlen(ip, temp);
1918                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1919                         nullstartblock((int)newlen));
1920                 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
1921                         XFS_DATA_FORK);
1922                 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK);
1923                 xfs_iext_remove(ip, idx, 1, state);
1924                 ip->i_df.if_lastex = idx - 1;
1925                 /* DELTA: Two in-core extents were replaced by one. */
1926                 temp2 = temp;
1927                 temp = left.br_startoff;
1928                 break;
1929
1930         case BMAP_LEFT_CONTIG:
1931                 /*
1932                  * New allocation is contiguous with a delayed allocation
1933                  * on the left.
1934                  * Merge the new allocation with the left neighbor.
1935                  */
1936                 temp = left.br_blockcount + new->br_blockcount;
1937                 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1,
1938                         XFS_DATA_FORK);
1939                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1940                 oldlen = startblockval(left.br_startblock) +
1941                         startblockval(new->br_startblock);
1942                 newlen = xfs_bmap_worst_indlen(ip, temp);
1943                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1944                         nullstartblock((int)newlen));
1945                 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1,
1946                         XFS_DATA_FORK);
1947                 ip->i_df.if_lastex = idx - 1;
1948                 /* DELTA: One in-core extent grew into a hole. */
1949                 temp2 = temp;
1950                 temp = left.br_startoff;
1951                 break;
1952
1953         case BMAP_RIGHT_CONTIG:
1954                 /*
1955                  * New allocation is contiguous with a delayed allocation
1956                  * on the right.
1957                  * Merge the new allocation with the right neighbor.
1958                  */
1959                 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK);
1960                 temp = new->br_blockcount + right.br_blockcount;
1961                 oldlen = startblockval(new->br_startblock) +
1962                         startblockval(right.br_startblock);
1963                 newlen = xfs_bmap_worst_indlen(ip, temp);
1964                 xfs_bmbt_set_allf(ep, new->br_startoff,
1965                         nullstartblock((int)newlen), temp, right.br_state);
1966                 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK);
1967                 ip->i_df.if_lastex = idx;
1968                 /* DELTA: One in-core extent grew into a hole. */
1969                 temp2 = temp;
1970                 temp = new->br_startoff;
1971                 break;
1972
1973         case 0:
1974                 /*
1975                  * New allocation is not contiguous with another
1976                  * delayed allocation.
1977                  * Insert a new entry.
1978                  */
1979                 oldlen = newlen = 0;
1980                 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL,
1981                         XFS_DATA_FORK);
1982                 xfs_iext_insert(ip, idx, 1, new, state);
1983                 ip->i_df.if_lastex = idx;
1984                 /* DELTA: A new in-core extent was added in a hole. */
1985                 temp2 = new->br_blockcount;
1986                 temp = new->br_startoff;
1987                 break;
1988         }
1989         if (oldlen != newlen) {
1990                 ASSERT(oldlen > newlen);
1991                 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
1992                         (int64_t)(oldlen - newlen), rsvd);
1993                 /*
1994                  * Nothing to do for disk quota accounting here.
1995                  */
1996         }
1997         if (delta) {
1998                 temp2 += temp;
1999                 if (delta->xed_startoff > temp)
2000                         delta->xed_startoff = temp;
2001                 if (delta->xed_blockcount < temp2)
2002                         delta->xed_blockcount = temp2;
2003         }
2004         *logflagsp = 0;
2005         return 0;
2006 }
2007
2008 /*
2009  * Called by xfs_bmap_add_extent to handle cases converting a hole
2010  * to a real allocation.
2011  */
2012 STATIC int                              /* error */
2013 xfs_bmap_add_extent_hole_real(
2014         xfs_inode_t             *ip,    /* incore inode pointer */
2015         xfs_extnum_t            idx,    /* extent number to update/insert */
2016         xfs_btree_cur_t         *cur,   /* if null, not a btree */
2017         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
2018         int                     *logflagsp, /* inode logging flags */
2019         xfs_extdelta_t          *delta, /* Change made to incore extents */
2020         int                     whichfork) /* data or attr fork */
2021 {
2022         xfs_bmbt_rec_host_t     *ep;    /* pointer to extent entry ins. point */
2023         int                     error;  /* error return value */
2024         int                     i;      /* temp state */
2025         xfs_ifork_t             *ifp;   /* inode fork pointer */
2026         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
2027         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
2028         int                     rval=0; /* return value (logging flags) */
2029         int                     state;  /* state bits, accessed thru macros */
2030         xfs_filblks_t           temp=0;
2031         xfs_filblks_t           temp2=0;
2032
2033         ifp = XFS_IFORK_PTR(ip, whichfork);
2034         ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
2035         ep = xfs_iext_get_ext(ifp, idx);
2036         state = 0;
2037
2038         if (whichfork == XFS_ATTR_FORK)
2039                 state |= BMAP_ATTRFORK;
2040
2041         /*
2042          * Check and set flags if this segment has a left neighbor.
2043          */
2044         if (idx > 0) {
2045                 state |= BMAP_LEFT_VALID;
2046                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
2047                 if (isnullstartblock(left.br_startblock))
2048                         state |= BMAP_LEFT_DELAY;
2049         }
2050
2051         /*
2052          * Check and set flags if this segment has a current value.
2053          * Not true if we're inserting into the "hole" at eof.
2054          */
2055         if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2056                 state |= BMAP_RIGHT_VALID;
2057                 xfs_bmbt_get_all(ep, &right);
2058                 if (isnullstartblock(right.br_startblock))
2059                         state |= BMAP_RIGHT_DELAY;
2060         }
2061
2062         /*
2063          * We're inserting a real allocation between "left" and "right".
2064          * Set the contiguity flags.  Don't let extents get too large.
2065          */
2066         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2067             left.br_startoff + left.br_blockcount == new->br_startoff &&
2068             left.br_startblock + left.br_blockcount == new->br_startblock &&
2069             left.br_state == new->br_state &&
2070             left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2071                 state |= BMAP_LEFT_CONTIG;
2072
2073         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2074             new->br_startoff + new->br_blockcount == right.br_startoff &&
2075             new->br_startblock + new->br_blockcount == right.br_startblock &&
2076             new->br_state == right.br_state &&
2077             new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2078             (!(state & BMAP_LEFT_CONTIG) ||
2079              left.br_blockcount + new->br_blockcount +
2080              right.br_blockcount <= MAXEXTLEN))
2081                 state |= BMAP_RIGHT_CONTIG;
2082
2083         error = 0;
2084         /*
2085          * Select which case we're in here, and implement it.
2086          */
2087         switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2088         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2089                 /*
2090                  * New allocation is contiguous with real allocations on the
2091                  * left and on the right.
2092                  * Merge all three into a single extent record.
2093                  */
2094                 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
2095                         whichfork);
2096                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2097                         left.br_blockcount + new->br_blockcount +
2098                         right.br_blockcount);
2099                 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
2100                         whichfork);
2101                 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork);
2102                 xfs_iext_remove(ip, idx, 1, state);
2103                 ifp->if_lastex = idx - 1;
2104                 XFS_IFORK_NEXT_SET(ip, whichfork,
2105                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2106                 if (cur == NULL) {
2107                         rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2108                 } else {
2109                         rval = XFS_ILOG_CORE;
2110                         if ((error = xfs_bmbt_lookup_eq(cur,
2111                                         right.br_startoff,
2112                                         right.br_startblock,
2113                                         right.br_blockcount, &i)))
2114                                 goto done;
2115                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2116                         if ((error = xfs_btree_delete(cur, &i)))
2117                                 goto done;
2118                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2119                         if ((error = xfs_btree_decrement(cur, 0, &i)))
2120                                 goto done;
2121                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2122                         if ((error = xfs_bmbt_update(cur, left.br_startoff,
2123                                         left.br_startblock,
2124                                         left.br_blockcount +
2125                                                 new->br_blockcount +
2126                                                 right.br_blockcount,
2127                                         left.br_state)))
2128                                 goto done;
2129                 }
2130                 /* DELTA: Two in-core extents were replaced by one. */
2131                 temp = left.br_startoff;
2132                 temp2 = left.br_blockcount +
2133                         new->br_blockcount +
2134                         right.br_blockcount;
2135                 break;
2136
2137         case BMAP_LEFT_CONTIG:
2138                 /*
2139                  * New allocation is contiguous with a real allocation
2140                  * on the left.
2141                  * Merge the new allocation with the left neighbor.
2142                  */
2143                 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork);
2144                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2145                         left.br_blockcount + new->br_blockcount);
2146                 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork);
2147                 ifp->if_lastex = idx - 1;
2148                 if (cur == NULL) {
2149                         rval = xfs_ilog_fext(whichfork);
2150                 } else {
2151                         rval = 0;
2152                         if ((error = xfs_bmbt_lookup_eq(cur,
2153                                         left.br_startoff,
2154                                         left.br_startblock,
2155                                         left.br_blockcount, &i)))
2156                                 goto done;
2157                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2158                         if ((error = xfs_bmbt_update(cur, left.br_startoff,
2159                                         left.br_startblock,
2160                                         left.br_blockcount +
2161                                                 new->br_blockcount,
2162                                         left.br_state)))
2163                                 goto done;
2164                 }
2165                 /* DELTA: One in-core extent grew. */
2166                 temp = left.br_startoff;
2167                 temp2 = left.br_blockcount +
2168                         new->br_blockcount;
2169                 break;
2170
2171         case BMAP_RIGHT_CONTIG:
2172                 /*
2173                  * New allocation is contiguous with a real allocation
2174                  * on the right.
2175                  * Merge the new allocation with the right neighbor.
2176                  */
2177                 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork);
2178                 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2179                         new->br_blockcount + right.br_blockcount,
2180                         right.br_state);
2181                 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork);
2182                 ifp->if_lastex = idx;
2183                 if (cur == NULL) {
2184                         rval = xfs_ilog_fext(whichfork);
2185                 } else {
2186                         rval = 0;
2187                         if ((error = xfs_bmbt_lookup_eq(cur,
2188                                         right.br_startoff,
2189                                         right.br_startblock,
2190                                         right.br_blockcount, &i)))
2191                                 goto done;
2192                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2193                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
2194                                         new->br_startblock,
2195                                         new->br_blockcount +
2196                                                 right.br_blockcount,
2197                                         right.br_state)))
2198                                 goto done;
2199                 }
2200                 /* DELTA: One in-core extent grew. */
2201                 temp = new->br_startoff;
2202                 temp2 = new->br_blockcount +
2203                         right.br_blockcount;
2204                 break;
2205
2206         case 0:
2207                 /*
2208                  * New allocation is not contiguous with another
2209                  * real allocation.
2210                  * Insert a new entry.
2211                  */
2212                 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork);
2213                 xfs_iext_insert(ip, idx, 1, new, state);
2214                 ifp->if_lastex = idx;
2215                 XFS_IFORK_NEXT_SET(ip, whichfork,
2216                         XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2217                 if (cur == NULL) {
2218                         rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2219                 } else {
2220                         rval = XFS_ILOG_CORE;
2221                         if ((error = xfs_bmbt_lookup_eq(cur,
2222                                         new->br_startoff,
2223                                         new->br_startblock,
2224                                         new->br_blockcount, &i)))
2225                                 goto done;
2226                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
2227                         cur->bc_rec.b.br_state = new->br_state;
2228                         if ((error = xfs_btree_insert(cur, &i)))
2229                                 goto done;
2230                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2231                 }
2232                 /* DELTA: A new extent was added in a hole. */
2233                 temp = new->br_startoff;
2234                 temp2 = new->br_blockcount;
2235                 break;
2236         }
2237         if (delta) {
2238                 temp2 += temp;
2239                 if (delta->xed_startoff > temp)
2240                         delta->xed_startoff = temp;
2241                 if (delta->xed_blockcount < temp2)
2242                         delta->xed_blockcount = temp2;
2243         }
2244 done:
2245         *logflagsp = rval;
2246         return error;
2247 }
2248
2249 /*
2250  * Adjust the size of the new extent based on di_extsize and rt extsize.
2251  */
2252 STATIC int
2253 xfs_bmap_extsize_align(
2254         xfs_mount_t     *mp,
2255         xfs_bmbt_irec_t *gotp,          /* next extent pointer */
2256         xfs_bmbt_irec_t *prevp,         /* previous extent pointer */
2257         xfs_extlen_t    extsz,          /* align to this extent size */
2258         int             rt,             /* is this a realtime inode? */
2259         int             eof,            /* is extent at end-of-file? */
2260         int             delay,          /* creating delalloc extent? */
2261         int             convert,        /* overwriting unwritten extent? */
2262         xfs_fileoff_t   *offp,          /* in/out: aligned offset */
2263         xfs_extlen_t    *lenp)          /* in/out: aligned length */
2264 {
2265         xfs_fileoff_t   orig_off;       /* original offset */
2266         xfs_extlen_t    orig_alen;      /* original length */
2267         xfs_fileoff_t   orig_end;       /* original off+len */
2268         xfs_fileoff_t   nexto;          /* next file offset */
2269         xfs_fileoff_t   prevo;          /* previous file offset */
2270         xfs_fileoff_t   align_off;      /* temp for offset */
2271         xfs_extlen_t    align_alen;     /* temp for length */
2272         xfs_extlen_t    temp;           /* temp for calculations */
2273
2274         if (convert)
2275                 return 0;
2276
2277         orig_off = align_off = *offp;
2278         orig_alen = align_alen = *lenp;
2279         orig_end = orig_off + orig_alen;
2280
2281         /*
2282          * If this request overlaps an existing extent, then don't
2283          * attempt to perform any additional alignment.
2284          */
2285         if (!delay && !eof &&
2286             (orig_off >= gotp->br_startoff) &&
2287             (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2288                 return 0;
2289         }
2290
2291         /*
2292          * If the file offset is unaligned vs. the extent size
2293          * we need to align it.  This will be possible unless
2294          * the file was previously written with a kernel that didn't
2295          * perform this alignment, or if a truncate shot us in the
2296          * foot.
2297          */
2298         temp = do_mod(orig_off, extsz);
2299         if (temp) {
2300                 align_alen += temp;
2301                 align_off -= temp;
2302         }
2303         /*
2304          * Same adjustment for the end of the requested area.
2305          */
2306         if ((temp = (align_alen % extsz))) {
2307                 align_alen += extsz - temp;
2308         }
2309         /*
2310          * If the previous block overlaps with this proposed allocation
2311          * then move the start forward without adjusting the length.
2312          */
2313         if (prevp->br_startoff != NULLFILEOFF) {
2314                 if (prevp->br_startblock == HOLESTARTBLOCK)
2315                         prevo = prevp->br_startoff;
2316                 else
2317                         prevo = prevp->br_startoff + prevp->br_blockcount;
2318         } else
2319                 prevo = 0;
2320         if (align_off != orig_off && align_off < prevo)
2321                 align_off = prevo;
2322         /*
2323          * If the next block overlaps with this proposed allocation
2324          * then move the start back without adjusting the length,
2325          * but not before offset 0.
2326          * This may of course make the start overlap previous block,
2327          * and if we hit the offset 0 limit then the next block
2328          * can still overlap too.
2329          */
2330         if (!eof && gotp->br_startoff != NULLFILEOFF) {
2331                 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2332                     (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2333                         nexto = gotp->br_startoff + gotp->br_blockcount;
2334                 else
2335                         nexto = gotp->br_startoff;
2336         } else
2337                 nexto = NULLFILEOFF;
2338         if (!eof &&
2339             align_off + align_alen != orig_end &&
2340             align_off + align_alen > nexto)
2341                 align_off = nexto > align_alen ? nexto - align_alen : 0;
2342         /*
2343          * If we're now overlapping the next or previous extent that
2344          * means we can't fit an extsz piece in this hole.  Just move
2345          * the start forward to the first valid spot and set
2346          * the length so we hit the end.
2347          */
2348         if (align_off != orig_off && align_off < prevo)
2349                 align_off = prevo;
2350         if (align_off + align_alen != orig_end &&
2351             align_off + align_alen > nexto &&
2352             nexto != NULLFILEOFF) {
2353                 ASSERT(nexto > prevo);
2354                 align_alen = nexto - align_off;
2355         }
2356
2357         /*
2358          * If realtime, and the result isn't a multiple of the realtime
2359          * extent size we need to remove blocks until it is.
2360          */
2361         if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2362                 /*
2363                  * We're not covering the original request, or
2364                  * we won't be able to once we fix the length.
2365                  */
2366                 if (orig_off < align_off ||
2367                     orig_end > align_off + align_alen ||
2368                     align_alen - temp < orig_alen)
2369                         return XFS_ERROR(EINVAL);
2370                 /*
2371                  * Try to fix it by moving the start up.
2372                  */
2373                 if (align_off + temp <= orig_off) {
2374                         align_alen -= temp;
2375                         align_off += temp;
2376                 }
2377                 /*
2378                  * Try to fix it by moving the end in.
2379                  */
2380                 else if (align_off + align_alen - temp >= orig_end)
2381                         align_alen -= temp;
2382                 /*
2383                  * Set the start to the minimum then trim the length.
2384                  */
2385                 else {
2386                         align_alen -= orig_off - align_off;
2387                         align_off = orig_off;
2388                         align_alen -= align_alen % mp->m_sb.sb_rextsize;
2389                 }
2390                 /*
2391                  * Result doesn't cover the request, fail it.
2392                  */
2393                 if (orig_off < align_off || orig_end > align_off + align_alen)
2394                         return XFS_ERROR(EINVAL);
2395         } else {
2396                 ASSERT(orig_off >= align_off);
2397                 ASSERT(orig_end <= align_off + align_alen);
2398         }
2399
2400 #ifdef DEBUG
2401         if (!eof && gotp->br_startoff != NULLFILEOFF)
2402                 ASSERT(align_off + align_alen <= gotp->br_startoff);
2403         if (prevp->br_startoff != NULLFILEOFF)
2404                 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2405 #endif
2406
2407         *lenp = align_alen;
2408         *offp = align_off;
2409         return 0;
2410 }
2411
2412 #define XFS_ALLOC_GAP_UNITS     4
2413
2414 STATIC void
2415 xfs_bmap_adjacent(
2416         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2417 {
2418         xfs_fsblock_t   adjust;         /* adjustment to block numbers */
2419         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2420         xfs_mount_t     *mp;            /* mount point structure */
2421         int             nullfb;         /* true if ap->firstblock isn't set */
2422         int             rt;             /* true if inode is realtime */
2423
2424 #define ISVALID(x,y)    \
2425         (rt ? \
2426                 (x) < mp->m_sb.sb_rblocks : \
2427                 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2428                 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2429                 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2430
2431         mp = ap->ip->i_mount;
2432         nullfb = ap->firstblock == NULLFSBLOCK;
2433         rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2434         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2435         /*
2436          * If allocating at eof, and there's a previous real block,
2437          * try to use its last block as our starting point.
2438          */
2439         if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2440             !isnullstartblock(ap->prevp->br_startblock) &&
2441             ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2442                     ap->prevp->br_startblock)) {
2443                 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2444                 /*
2445                  * Adjust for the gap between prevp and us.
2446                  */
2447                 adjust = ap->off -
2448                         (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2449                 if (adjust &&
2450                     ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2451                         ap->rval += adjust;
2452         }
2453         /*
2454          * If not at eof, then compare the two neighbor blocks.
2455          * Figure out whether either one gives us a good starting point,
2456          * and pick the better one.
2457          */
2458         else if (!ap->eof) {
2459                 xfs_fsblock_t   gotbno;         /* right side block number */
2460                 xfs_fsblock_t   gotdiff=0;      /* right side difference */
2461                 xfs_fsblock_t   prevbno;        /* left side block number */
2462                 xfs_fsblock_t   prevdiff=0;     /* left side difference */
2463
2464                 /*
2465                  * If there's a previous (left) block, select a requested
2466                  * start block based on it.
2467                  */
2468                 if (ap->prevp->br_startoff != NULLFILEOFF &&
2469                     !isnullstartblock(ap->prevp->br_startblock) &&
2470                     (prevbno = ap->prevp->br_startblock +
2471                                ap->prevp->br_blockcount) &&
2472                     ISVALID(prevbno, ap->prevp->br_startblock)) {
2473                         /*
2474                          * Calculate gap to end of previous block.
2475                          */
2476                         adjust = prevdiff = ap->off -
2477                                 (ap->prevp->br_startoff +
2478                                  ap->prevp->br_blockcount);
2479                         /*
2480                          * Figure the startblock based on the previous block's
2481                          * end and the gap size.
2482                          * Heuristic!
2483                          * If the gap is large relative to the piece we're
2484                          * allocating, or using it gives us an invalid block
2485                          * number, then just use the end of the previous block.
2486                          */
2487                         if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2488                             ISVALID(prevbno + prevdiff,
2489                                     ap->prevp->br_startblock))
2490                                 prevbno += adjust;
2491                         else
2492                                 prevdiff += adjust;
2493                         /*
2494                          * If the firstblock forbids it, can't use it,
2495                          * must use default.
2496                          */
2497                         if (!rt && !nullfb &&
2498                             XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2499                                 prevbno = NULLFSBLOCK;
2500                 }
2501                 /*
2502                  * No previous block or can't follow it, just default.
2503                  */
2504                 else
2505                         prevbno = NULLFSBLOCK;
2506                 /*
2507                  * If there's a following (right) block, select a requested
2508                  * start block based on it.
2509                  */
2510                 if (!isnullstartblock(ap->gotp->br_startblock)) {
2511                         /*
2512                          * Calculate gap to start of next block.
2513                          */
2514                         adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2515                         /*
2516                          * Figure the startblock based on the next block's
2517                          * start and the gap size.
2518                          */
2519                         gotbno = ap->gotp->br_startblock;
2520                         /*
2521                          * Heuristic!
2522                          * If the gap is large relative to the piece we're
2523                          * allocating, or using it gives us an invalid block
2524                          * number, then just use the start of the next block
2525                          * offset by our length.
2526                          */
2527                         if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2528                             ISVALID(gotbno - gotdiff, gotbno))
2529                                 gotbno -= adjust;
2530                         else if (ISVALID(gotbno - ap->alen, gotbno)) {
2531                                 gotbno -= ap->alen;
2532                                 gotdiff += adjust - ap->alen;
2533                         } else
2534                                 gotdiff += adjust;
2535                         /*
2536                          * If the firstblock forbids it, can't use it,
2537                          * must use default.
2538                          */
2539                         if (!rt && !nullfb &&
2540                             XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2541                                 gotbno = NULLFSBLOCK;
2542                 }
2543                 /*
2544                  * No next block, just default.
2545                  */
2546                 else
2547                         gotbno = NULLFSBLOCK;
2548                 /*
2549                  * If both valid, pick the better one, else the only good
2550                  * one, else ap->rval is already set (to 0 or the inode block).
2551                  */
2552                 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2553                         ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2554                 else if (prevbno != NULLFSBLOCK)
2555                         ap->rval = prevbno;
2556                 else if (gotbno != NULLFSBLOCK)
2557                         ap->rval = gotbno;
2558         }
2559 #undef ISVALID
2560 }
2561
2562 STATIC int
2563 xfs_bmap_rtalloc(
2564         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2565 {
2566         xfs_alloctype_t atype = 0;      /* type for allocation routines */
2567         int             error;          /* error return value */
2568         xfs_mount_t     *mp;            /* mount point structure */
2569         xfs_extlen_t    prod = 0;       /* product factor for allocators */
2570         xfs_extlen_t    ralen = 0;      /* realtime allocation length */
2571         xfs_extlen_t    align;          /* minimum allocation alignment */
2572         xfs_rtblock_t   rtb;
2573
2574         mp = ap->ip->i_mount;
2575         align = xfs_get_extsz_hint(ap->ip);
2576         prod = align / mp->m_sb.sb_rextsize;
2577         error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2578                                         align, 1, ap->eof, 0,
2579                                         ap->conv, &ap->off, &ap->alen);
2580         if (error)
2581                 return error;
2582         ASSERT(ap->alen);
2583         ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2584
2585         /*
2586          * If the offset & length are not perfectly aligned
2587          * then kill prod, it will just get us in trouble.
2588          */
2589         if (do_mod(ap->off, align) || ap->alen % align)
2590                 prod = 1;
2591         /*
2592          * Set ralen to be the actual requested length in rtextents.
2593          */
2594         ralen = ap->alen / mp->m_sb.sb_rextsize;
2595         /*
2596          * If the old value was close enough to MAXEXTLEN that
2597          * we rounded up to it, cut it back so it's valid again.
2598          * Note that if it's a really large request (bigger than
2599          * MAXEXTLEN), we don't hear about that number, and can't
2600          * adjust the starting point to match it.
2601          */
2602         if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2603                 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2604         /*
2605          * If it's an allocation to an empty file at offset 0,
2606          * pick an extent that will space things out in the rt area.
2607          */
2608         if (ap->eof && ap->off == 0) {
2609                 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2610
2611                 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2612                 if (error)
2613                         return error;
2614                 ap->rval = rtx * mp->m_sb.sb_rextsize;
2615         } else {
2616                 ap->rval = 0;
2617         }
2618
2619         xfs_bmap_adjacent(ap);
2620
2621         /*
2622          * Realtime allocation, done through xfs_rtallocate_extent.
2623          */
2624         atype = ap->rval == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2625         do_div(ap->rval, mp->m_sb.sb_rextsize);
2626         rtb = ap->rval;
2627         ap->alen = ralen;
2628         if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2629                                 &ralen, atype, ap->wasdel, prod, &rtb)))
2630                 return error;
2631         if (rtb == NULLFSBLOCK && prod > 1 &&
2632             (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2633                                            ap->alen, &ralen, atype,
2634                                            ap->wasdel, 1, &rtb)))
2635                 return error;
2636         ap->rval = rtb;
2637         if (ap->rval != NULLFSBLOCK) {
2638                 ap->rval *= mp->m_sb.sb_rextsize;
2639                 ralen *= mp->m_sb.sb_rextsize;
2640                 ap->alen = ralen;
2641                 ap->ip->i_d.di_nblocks += ralen;
2642                 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2643                 if (ap->wasdel)
2644                         ap->ip->i_delayed_blks -= ralen;
2645                 /*
2646                  * Adjust the disk quota also. This was reserved
2647                  * earlier.
2648                  */
2649                 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2650                         ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2651                                         XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2652         } else {
2653                 ap->alen = 0;
2654         }
2655         return 0;
2656 }
2657
2658 STATIC int
2659 xfs_bmap_btalloc(
2660         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2661 {
2662         xfs_mount_t     *mp;            /* mount point structure */
2663         xfs_alloctype_t atype = 0;      /* type for allocation routines */
2664         xfs_extlen_t    align;          /* minimum allocation alignment */
2665         xfs_agnumber_t  ag;
2666         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2667         xfs_agnumber_t  startag;
2668         xfs_alloc_arg_t args;
2669         xfs_extlen_t    blen;
2670         xfs_extlen_t    nextminlen = 0;
2671         xfs_perag_t     *pag;
2672         int             nullfb;         /* true if ap->firstblock isn't set */
2673         int             isaligned;
2674         int             notinit;
2675         int             tryagain;
2676         int             error;
2677
2678         mp = ap->ip->i_mount;
2679         align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2680         if (unlikely(align)) {
2681                 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2682                                                 align, 0, ap->eof, 0, ap->conv,
2683                                                 &ap->off, &ap->alen);
2684                 ASSERT(!error);
2685                 ASSERT(ap->alen);
2686         }
2687         nullfb = ap->firstblock == NULLFSBLOCK;
2688         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2689         if (nullfb) {
2690                 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2691                         ag = xfs_filestream_lookup_ag(ap->ip);
2692                         ag = (ag != NULLAGNUMBER) ? ag : 0;
2693                         ap->rval = XFS_AGB_TO_FSB(mp, ag, 0);
2694                 } else {
2695                         ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2696                 }
2697         } else
2698                 ap->rval = ap->firstblock;
2699
2700         xfs_bmap_adjacent(ap);
2701
2702         /*
2703          * If allowed, use ap->rval; otherwise must use firstblock since
2704          * it's in the right allocation group.
2705          */
2706         if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2707                 ;
2708         else
2709                 ap->rval = ap->firstblock;
2710         /*
2711          * Normal allocation, done through xfs_alloc_vextent.
2712          */
2713         tryagain = isaligned = 0;
2714         args.tp = ap->tp;
2715         args.mp = mp;
2716         args.fsbno = ap->rval;
2717         args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2718         args.firstblock = ap->firstblock;
2719         blen = 0;
2720         if (nullfb) {
2721                 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2722                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
2723                 else
2724                         args.type = XFS_ALLOCTYPE_START_BNO;
2725                 args.total = ap->total;
2726
2727                 /*
2728                  * Search for an allocation group with a single extent
2729                  * large enough for the request.
2730                  *
2731                  * If one isn't found, then adjust the minimum allocation
2732                  * size to the largest space found.
2733                  */
2734                 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2735                 if (startag == NULLAGNUMBER)
2736                         startag = ag = 0;
2737                 notinit = 0;
2738                 down_read(&mp->m_peraglock);
2739                 while (blen < ap->alen) {
2740                         pag = &mp->m_perag[ag];
2741                         if (!pag->pagf_init &&
2742                             (error = xfs_alloc_pagf_init(mp, args.tp,
2743                                     ag, XFS_ALLOC_FLAG_TRYLOCK))) {
2744                                 up_read(&mp->m_peraglock);
2745                                 return error;
2746                         }
2747                         /*
2748                          * See xfs_alloc_fix_freelist...
2749                          */
2750                         if (pag->pagf_init) {
2751                                 xfs_extlen_t    longest;
2752                                 longest = xfs_alloc_longest_free_extent(mp, pag);
2753                                 if (blen < longest)
2754                                         blen = longest;
2755                         } else
2756                                 notinit = 1;
2757
2758                         if (xfs_inode_is_filestream(ap->ip)) {
2759                                 if (blen >= ap->alen)
2760                                         break;
2761
2762                                 if (ap->userdata) {
2763                                         /*
2764                                          * If startag is an invalid AG, we've
2765                                          * come here once before and
2766                                          * xfs_filestream_new_ag picked the
2767                                          * best currently available.
2768                                          *
2769                                          * Don't continue looping, since we
2770                                          * could loop forever.
2771                                          */
2772                                         if (startag == NULLAGNUMBER)
2773                                                 break;
2774
2775                                         error = xfs_filestream_new_ag(ap, &ag);
2776                                         if (error) {
2777                                                 up_read(&mp->m_peraglock);
2778                                                 return error;
2779                                         }
2780
2781                                         /* loop again to set 'blen'*/
2782                                         startag = NULLAGNUMBER;
2783                                         continue;
2784                                 }
2785                         }
2786                         if (++ag == mp->m_sb.sb_agcount)
2787                                 ag = 0;
2788                         if (ag == startag)
2789                                 break;
2790                 }
2791                 up_read(&mp->m_peraglock);
2792                 /*
2793                  * Since the above loop did a BUF_TRYLOCK, it is
2794                  * possible that there is space for this request.
2795                  */
2796                 if (notinit || blen < ap->minlen)
2797                         args.minlen = ap->minlen;
2798                 /*
2799                  * If the best seen length is less than the request
2800                  * length, use the best as the minimum.
2801                  */
2802                 else if (blen < ap->alen)
2803                         args.minlen = blen;
2804                 /*
2805                  * Otherwise we've seen an extent as big as alen,
2806                  * use that as the minimum.
2807                  */
2808                 else
2809                         args.minlen = ap->alen;
2810
2811                 /*
2812                  * set the failure fallback case to look in the selected
2813                  * AG as the stream may have moved.
2814                  */
2815                 if (xfs_inode_is_filestream(ap->ip))
2816                         ap->rval = args.fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2817         } else if (ap->low) {
2818                 if (xfs_inode_is_filestream(ap->ip))
2819                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2820                 else
2821                         args.type = XFS_ALLOCTYPE_START_BNO;
2822                 args.total = args.minlen = ap->minlen;
2823         } else {
2824                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2825                 args.total = ap->total;
2826                 args.minlen = ap->minlen;
2827         }
2828         /* apply extent size hints if obtained earlier */
2829         if (unlikely(align)) {
2830                 args.prod = align;
2831                 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2832                         args.mod = (xfs_extlen_t)(args.prod - args.mod);
2833         } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2834                 args.prod = 1;
2835                 args.mod = 0;
2836         } else {
2837                 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2838                 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2839                         args.mod = (xfs_extlen_t)(args.prod - args.mod);
2840         }
2841         /*
2842          * If we are not low on available data blocks, and the
2843          * underlying logical volume manager is a stripe, and
2844          * the file offset is zero then try to allocate data
2845          * blocks on stripe unit boundary.
2846          * NOTE: ap->aeof is only set if the allocation length
2847          * is >= the stripe unit and the allocation offset is
2848          * at the end of file.
2849          */
2850         if (!ap->low && ap->aeof) {
2851                 if (!ap->off) {
2852                         args.alignment = mp->m_dalign;
2853                         atype = args.type;
2854                         isaligned = 1;
2855                         /*
2856                          * Adjust for alignment
2857                          */
2858                         if (blen > args.alignment && blen <= ap->alen)
2859                                 args.minlen = blen - args.alignment;
2860                         args.minalignslop = 0;
2861                 } else {
2862                         /*
2863                          * First try an exact bno allocation.
2864                          * If it fails then do a near or start bno
2865                          * allocation with alignment turned on.
2866                          */
2867                         atype = args.type;
2868                         tryagain = 1;
2869                         args.type = XFS_ALLOCTYPE_THIS_BNO;
2870                         args.alignment = 1;
2871                         /*
2872                          * Compute the minlen+alignment for the
2873                          * next case.  Set slop so that the value
2874                          * of minlen+alignment+slop doesn't go up
2875                          * between the calls.
2876                          */
2877                         if (blen > mp->m_dalign && blen <= ap->alen)
2878                                 nextminlen = blen - mp->m_dalign;
2879                         else
2880                                 nextminlen = args.minlen;
2881                         if (nextminlen + mp->m_dalign > args.minlen + 1)
2882                                 args.minalignslop =
2883                                         nextminlen + mp->m_dalign -
2884                                         args.minlen - 1;
2885                         else
2886                                 args.minalignslop = 0;
2887                 }
2888         } else {
2889                 args.alignment = 1;
2890                 args.minalignslop = 0;
2891         }
2892         args.minleft = ap->minleft;
2893         args.wasdel = ap->wasdel;
2894         args.isfl = 0;
2895         args.userdata = ap->userdata;
2896         if ((error = xfs_alloc_vextent(&args)))
2897                 return error;
2898         if (tryagain && args.fsbno == NULLFSBLOCK) {
2899                 /*
2900                  * Exact allocation failed. Now try with alignment
2901                  * turned on.
2902                  */
2903                 args.type = atype;
2904                 args.fsbno = ap->rval;
2905                 args.alignment = mp->m_dalign;
2906                 args.minlen = nextminlen;
2907                 args.minalignslop = 0;
2908                 isaligned = 1;
2909                 if ((error = xfs_alloc_vextent(&args)))
2910                         return error;
2911         }
2912         if (isaligned && args.fsbno == NULLFSBLOCK) {
2913                 /*
2914                  * allocation failed, so turn off alignment and
2915                  * try again.
2916                  */
2917                 args.type = atype;
2918                 args.fsbno = ap->rval;
2919                 args.alignment = 0;
2920                 if ((error = xfs_alloc_vextent(&args)))
2921                         return error;
2922         }
2923         if (args.fsbno == NULLFSBLOCK && nullfb &&
2924             args.minlen > ap->minlen) {
2925                 args.minlen = ap->minlen;
2926                 args.type = XFS_ALLOCTYPE_START_BNO;
2927                 args.fsbno = ap->rval;
2928                 if ((error = xfs_alloc_vextent(&args)))
2929                         return error;
2930         }
2931         if (args.fsbno == NULLFSBLOCK && nullfb) {
2932                 args.fsbno = 0;
2933                 args.type = XFS_ALLOCTYPE_FIRST_AG;
2934                 args.total = ap->minlen;
2935                 args.minleft = 0;
2936                 if ((error = xfs_alloc_vextent(&args)))
2937                         return error;
2938                 ap->low = 1;
2939         }
2940         if (args.fsbno != NULLFSBLOCK) {
2941                 ap->firstblock = ap->rval = args.fsbno;
2942                 ASSERT(nullfb || fb_agno == args.agno ||
2943                        (ap->low && fb_agno < args.agno));
2944                 ap->alen = args.len;
2945                 ap->ip->i_d.di_nblocks += args.len;
2946                 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2947                 if (ap->wasdel)
2948                         ap->ip->i_delayed_blks -= args.len;
2949                 /*
2950                  * Adjust the disk quota also. This was reserved
2951                  * earlier.
2952                  */
2953                 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2954                         ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2955                                         XFS_TRANS_DQ_BCOUNT,
2956                         (long) args.len);
2957         } else {
2958                 ap->rval = NULLFSBLOCK;
2959                 ap->alen = 0;
2960         }
2961         return 0;
2962 }
2963
2964 /*
2965  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2966  * It figures out where to ask the underlying allocator to put the new extent.
2967  */
2968 STATIC int
2969 xfs_bmap_alloc(
2970         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2971 {
2972         if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2973                 return xfs_bmap_rtalloc(ap);
2974         return xfs_bmap_btalloc(ap);
2975 }
2976
2977 /*
2978  * Transform a btree format file with only one leaf node, where the
2979  * extents list will fit in the inode, into an extents format file.
2980  * Since the file extents are already in-core, all we have to do is
2981  * give up the space for the btree root and pitch the leaf block.
2982  */
2983 STATIC int                              /* error */
2984 xfs_bmap_btree_to_extents(
2985         xfs_trans_t             *tp,    /* transaction pointer */
2986         xfs_inode_t             *ip,    /* incore inode pointer */
2987         xfs_btree_cur_t         *cur,   /* btree cursor */
2988         int                     *logflagsp, /* inode logging flags */
2989         int                     whichfork)  /* data or attr fork */
2990 {
2991         /* REFERENCED */
2992         struct xfs_btree_block  *cblock;/* child btree block */
2993         xfs_fsblock_t           cbno;   /* child block number */
2994         xfs_buf_t               *cbp;   /* child block's buffer */
2995         int                     error;  /* error return value */
2996         xfs_ifork_t             *ifp;   /* inode fork data */
2997         xfs_mount_t             *mp;    /* mount point structure */
2998         __be64                  *pp;    /* ptr to block address */
2999         struct xfs_btree_block  *rblock;/* root btree block */
3000
3001         mp = ip->i_mount;
3002         ifp = XFS_IFORK_PTR(ip, whichfork);
3003         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3004         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
3005         rblock = ifp->if_broot;
3006         ASSERT(be16_to_cpu(rblock->bb_level) == 1);
3007         ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
3008         ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
3009         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
3010         cbno = be64_to_cpu(*pp);
3011         *logflagsp = 0;
3012 #ifdef DEBUG
3013         if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
3014                 return error;
3015 #endif
3016         if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
3017                         XFS_BMAP_BTREE_REF)))
3018                 return error;
3019         cblock = XFS_BUF_TO_BLOCK(cbp);
3020         if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
3021                 return error;
3022         xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
3023         ip->i_d.di_nblocks--;
3024         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
3025         xfs_trans_binval(tp, cbp);
3026         if (cur->bc_bufs[0] == cbp)
3027                 cur->bc_bufs[0] = NULL;
3028         xfs_iroot_realloc(ip, -1, whichfork);
3029         ASSERT(ifp->if_broot == NULL);
3030         ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
3031         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3032         *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3033         return 0;
3034 }
3035
3036 /*
3037  * Called by xfs_bmapi to update file extent records and the btree
3038  * after removing space (or undoing a delayed allocation).
3039  */
3040 STATIC int                              /* error */
3041 xfs_bmap_del_extent(
3042         xfs_inode_t             *ip,    /* incore inode pointer */
3043         xfs_trans_t             *tp,    /* current transaction pointer */
3044         xfs_extnum_t            idx,    /* extent number to update/delete */
3045         xfs_bmap_free_t         *flist, /* list of extents to be freed */
3046         xfs_btree_cur_t         *cur,   /* if null, not a btree */
3047         xfs_bmbt_irec_t         *del,   /* data to remove from extents */
3048         int                     *logflagsp, /* inode logging flags */
3049         xfs_extdelta_t          *delta, /* Change made to incore extents */
3050         int                     whichfork, /* data or attr fork */
3051         int                     rsvd)   /* OK to allocate reserved blocks */
3052 {
3053         xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
3054         xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
3055         xfs_fsblock_t           del_endblock=0; /* first block past del */
3056         xfs_fileoff_t           del_endoff;     /* first offset past del */
3057         int                     delay;  /* current block is delayed allocated */
3058         int                     do_fx;  /* free extent at end of routine */
3059         xfs_bmbt_rec_host_t     *ep;    /* current extent entry pointer */
3060         int                     error;  /* error return value */
3061         int                     flags;  /* inode logging flags */
3062         xfs_bmbt_irec_t         got;    /* current extent entry */
3063         xfs_fileoff_t           got_endoff;     /* first offset past got */
3064         int                     i;      /* temp state */
3065         xfs_ifork_t             *ifp;   /* inode fork pointer */
3066         xfs_mount_t             *mp;    /* mount structure */
3067         xfs_filblks_t           nblks;  /* quota/sb block count */
3068         xfs_bmbt_irec_t         new;    /* new record to be inserted */
3069         /* REFERENCED */
3070         uint                    qfield; /* quota field to update */
3071         xfs_filblks_t           temp;   /* for indirect length calculations */
3072         xfs_filblks_t           temp2;  /* for indirect length calculations */
3073
3074         XFS_STATS_INC(xs_del_exlist);
3075         mp = ip->i_mount;
3076         ifp = XFS_IFORK_PTR(ip, whichfork);
3077         ASSERT((idx >= 0) && (idx < ifp->if_bytes /
3078                 (uint)sizeof(xfs_bmbt_rec_t)));
3079         ASSERT(del->br_blockcount > 0);
3080         ep = xfs_iext_get_ext(ifp, idx);
3081         xfs_bmbt_get_all(ep, &got);
3082         ASSERT(got.br_startoff <= del->br_startoff);
3083         del_endoff = del->br_startoff + del->br_blockcount;
3084         got_endoff = got.br_startoff + got.br_blockcount;
3085         ASSERT(got_endoff >= del_endoff);
3086         delay = isnullstartblock(got.br_startblock);
3087         ASSERT(isnullstartblock(del->br_startblock) == delay);
3088         flags = 0;
3089         qfield = 0;
3090         error = 0;
3091         /*
3092          * If deleting a real allocation, must free up the disk space.
3093          */
3094         if (!delay) {
3095                 flags = XFS_ILOG_CORE;
3096                 /*
3097                  * Realtime allocation.  Free it and record di_nblocks update.
3098                  */
3099                 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
3100                         xfs_fsblock_t   bno;
3101                         xfs_filblks_t   len;
3102
3103                         ASSERT(do_mod(del->br_blockcount,
3104                                       mp->m_sb.sb_rextsize) == 0);
3105                         ASSERT(do_mod(del->br_startblock,
3106                                       mp->m_sb.sb_rextsize) == 0);
3107                         bno = del->br_startblock;
3108                         len = del->br_blockcount;
3109                         do_div(bno, mp->m_sb.sb_rextsize);
3110                         do_div(len, mp->m_sb.sb_rextsize);
3111                         if ((error = xfs_rtfree_extent(ip->i_transp, bno,
3112                                         (xfs_extlen_t)len)))
3113                                 goto done;
3114                         do_fx = 0;
3115                         nblks = len * mp->m_sb.sb_rextsize;
3116                         qfield = XFS_TRANS_DQ_RTBCOUNT;
3117                 }
3118                 /*
3119                  * Ordinary allocation.
3120                  */
3121                 else {
3122                         do_fx = 1;
3123                         nblks = del->br_blockcount;
3124                         qfield = XFS_TRANS_DQ_BCOUNT;
3125                 }
3126                 /*
3127                  * Set up del_endblock and cur for later.
3128                  */
3129                 del_endblock = del->br_startblock + del->br_blockcount;
3130                 if (cur) {
3131                         if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
3132                                         got.br_startblock, got.br_blockcount,
3133                                         &i)))
3134                                 goto done;
3135                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3136                 }
3137                 da_old = da_new = 0;
3138         } else {
3139                 da_old = startblockval(got.br_startblock);
3140                 da_new = 0;
3141                 nblks = 0;
3142                 do_fx = 0;
3143         }
3144         /*
3145          * Set flag value to use in switch statement.
3146          * Left-contig is 2, right-contig is 1.
3147          */
3148         switch (((got.br_startoff == del->br_startoff) << 1) |
3149                 (got_endoff == del_endoff)) {
3150         case 3:
3151                 /*
3152                  * Matches the whole extent.  Delete the entry.
3153                  */
3154                 XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork);
3155                 xfs_iext_remove(ip, idx, 1,
3156                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
3157                 ifp->if_lastex = idx;
3158                 if (delay)
3159                         break;
3160                 XFS_IFORK_NEXT_SET(ip, whichfork,
3161                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
3162                 flags |= XFS_ILOG_CORE;
3163                 if (!cur) {
3164                         flags |= xfs_ilog_fext(whichfork);
3165                         break;
3166                 }
3167                 if ((error = xfs_btree_delete(cur, &i)))
3168                         goto done;
3169                 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3170                 break;
3171
3172         case 2:
3173                 /*
3174                  * Deleting the first part of the extent.
3175                  */
3176                 XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork);
3177                 xfs_bmbt_set_startoff(ep, del_endoff);
3178                 temp = got.br_blockcount - del->br_blockcount;
3179                 xfs_bmbt_set_blockcount(ep, temp);
3180                 ifp->if_lastex = idx;
3181                 if (delay) {
3182                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3183                                 da_old);
3184                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3185                         XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx,
3186                                 whichfork);
3187                         da_new = temp;
3188                         break;
3189                 }
3190                 xfs_bmbt_set_startblock(ep, del_endblock);
3191                 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork);
3192                 if (!cur) {
3193                         flags |= xfs_ilog_fext(whichfork);
3194                         break;
3195                 }
3196                 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
3197                                 got.br_blockcount - del->br_blockcount,
3198                                 got.br_state)))
3199                         goto done;
3200                 break;
3201
3202         case 1:
3203                 /*
3204                  * Deleting the last part of the extent.
3205                  */
3206                 temp = got.br_blockcount - del->br_blockcount;
3207                 XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork);
3208                 xfs_bmbt_set_blockcount(ep, temp);
3209                 ifp->if_lastex = idx;
3210                 if (delay) {
3211                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3212                                 da_old);
3213                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3214                         XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx,
3215                                 whichfork);
3216                         da_new = temp;
3217                         break;
3218                 }
3219                 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork);
3220                 if (!cur) {
3221                         flags |= xfs_ilog_fext(whichfork);
3222                         break;
3223                 }
3224                 if ((error = xfs_bmbt_update(cur, got.br_startoff,
3225                                 got.br_startblock,
3226                                 got.br_blockcount - del->br_blockcount,
3227                                 got.br_state)))
3228                         goto done;
3229                 break;
3230
3231         case 0:
3232                 /*
3233                  * Deleting the middle of the extent.
3234                  */
3235                 temp = del->br_startoff - got.br_startoff;
3236                 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork);
3237                 xfs_bmbt_set_blockcount(ep, temp);
3238                 new.br_startoff = del_endoff;
3239                 temp2 = got_endoff - del_endoff;
3240                 new.br_blockcount = temp2;
3241                 new.br_state = got.br_state;
3242                 if (!delay) {
3243                         new.br_startblock = del_endblock;
3244                         flags |= XFS_ILOG_CORE;
3245                         if (cur) {
3246                                 if ((error = xfs_bmbt_update(cur,
3247                                                 got.br_startoff,
3248                                                 got.br_startblock, temp,
3249                                                 got.br_state)))
3250                                         goto done;
3251                                 if ((error = xfs_btree_increment(cur, 0, &i)))
3252                                         goto done;
3253                                 cur->bc_rec.b = new;
3254                                 error = xfs_btree_insert(cur, &i);
3255                                 if (error && error != ENOSPC)
3256                                         goto done;
3257                                 /*
3258                                  * If get no-space back from btree insert,
3259                                  * it tried a split, and we have a zero
3260                                  * block reservation.
3261                                  * Fix up our state and return the error.
3262                                  */
3263                                 if (error == ENOSPC) {
3264                                         /*
3265                                          * Reset the cursor, don't trust
3266                                          * it after any insert operation.
3267                                          */
3268                                         if ((error = xfs_bmbt_lookup_eq(cur,
3269                                                         got.br_startoff,
3270                                                         got.br_startblock,
3271                                                         temp, &i)))
3272                                                 goto done;
3273                                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3274                                         /*
3275                                          * Update the btree record back
3276                                          * to the original value.
3277                                          */
3278                                         if ((error = xfs_bmbt_update(cur,
3279                                                         got.br_startoff,
3280                                                         got.br_startblock,
3281                                                         got.br_blockcount,
3282                                                         got.br_state)))
3283                                                 goto done;
3284                                         /*
3285                                          * Reset the extent record back
3286                                          * to the original value.
3287                                          */
3288                                         xfs_bmbt_set_blockcount(ep,
3289                                                 got.br_blockcount);
3290                                         flags = 0;
3291                                         error = XFS_ERROR(ENOSPC);
3292                                         goto done;
3293                                 }
3294                                 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3295                         } else
3296                                 flags |= xfs_ilog_fext(whichfork);
3297                         XFS_IFORK_NEXT_SET(ip, whichfork,
3298                                 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3299                 } else {
3300                         ASSERT(whichfork == XFS_DATA_FORK);
3301                         temp = xfs_bmap_worst_indlen(ip, temp);
3302                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3303                         temp2 = xfs_bmap_worst_indlen(ip, temp2);
3304                         new.br_startblock = nullstartblock((int)temp2);
3305                         da_new = temp + temp2;
3306                         while (da_new > da_old) {
3307                                 if (temp) {
3308                                         temp--;
3309                                         da_new--;
3310                                         xfs_bmbt_set_startblock(ep,
3311                                                 nullstartblock((int)temp));
3312                                 }
3313                                 if (da_new == da_old)
3314                                         break;
3315                                 if (temp2) {
3316                                         temp2--;
3317                                         da_new--;
3318                                         new.br_startblock =
3319                                                 nullstartblock((int)temp2);
3320                                 }
3321                         }
3322                 }
3323                 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork);
3324                 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL,
3325                         whichfork);
3326                 xfs_iext_insert(ip, idx + 1, 1, &new,
3327                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
3328                 ifp->if_lastex = idx + 1;
3329                 break;
3330         }
3331         /*
3332          * If we need to, add to list of extents to delete.
3333          */
3334         if (do_fx)
3335                 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
3336                         mp);
3337         /*
3338          * Adjust inode # blocks in the file.
3339          */
3340         if (nblks)
3341                 ip->i_d.di_nblocks -= nblks;
3342         /*
3343          * Adjust quota data.
3344          */
3345         if (qfield)
3346                 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
3347
3348         /*
3349          * Account for change in delayed indirect blocks.
3350          * Nothing to do for disk quota accounting here.
3351          */
3352         ASSERT(da_old >= da_new);
3353         if (da_old > da_new)
3354                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
3355                         rsvd);
3356         if (delta) {
3357                 /* DELTA: report the original extent. */
3358                 if (delta->xed_startoff > got.br_startoff)
3359                         delta->xed_startoff = got.br_startoff;
3360                 if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
3361                         delta->xed_blockcount = got.br_startoff +
3362                                                         got.br_blockcount;
3363         }
3364 done:
3365         *logflagsp = flags;
3366         return error;
3367 }
3368
3369 /*
3370  * Remove the entry "free" from the free item list.  Prev points to the
3371  * previous entry, unless "free" is the head of the list.
3372  */
3373 STATIC void
3374 xfs_bmap_del_free(
3375         xfs_bmap_free_t         *flist, /* free item list header */
3376         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
3377         xfs_bmap_free_item_t    *free)  /* list item to be freed */
3378 {
3379         if (prev)
3380                 prev->xbfi_next = free->xbfi_next;
3381         else
3382                 flist->xbf_first = free->xbfi_next;
3383         flist->xbf_count--;
3384         kmem_zone_free(xfs_bmap_free_item_zone, free);
3385 }
3386
3387 /*
3388  * Convert an extents-format file into a btree-format file.
3389  * The new file will have a root block (in the inode) and a single child block.
3390  */
3391 STATIC int                                      /* error */
3392 xfs_bmap_extents_to_btree(
3393         xfs_trans_t             *tp,            /* transaction pointer */
3394         xfs_inode_t             *ip,            /* incore inode pointer */
3395         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
3396         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
3397         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
3398         int                     wasdel,         /* converting a delayed alloc */
3399         int                     *logflagsp,     /* inode logging flags */
3400         int                     whichfork)      /* data or attr fork */
3401 {
3402         struct xfs_btree_block  *ablock;        /* allocated (child) bt block */
3403         xfs_buf_t               *abp;           /* buffer for ablock */
3404         xfs_alloc_arg_t         args;           /* allocation arguments */
3405         xfs_bmbt_rec_t          *arp;           /* child record pointer */
3406         struct xfs_btree_block  *block;         /* btree root block */
3407         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
3408         xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
3409         int                     error;          /* error return value */
3410         xfs_extnum_t            i, cnt;         /* extent record index */
3411         xfs_ifork_t             *ifp;           /* inode fork pointer */
3412         xfs_bmbt_key_t          *kp;            /* root block key pointer */
3413         xfs_mount_t             *mp;            /* mount structure */
3414         xfs_extnum_t            nextents;       /* number of file extents */
3415         xfs_bmbt_ptr_t          *pp;            /* root block address pointer */
3416
3417         ifp = XFS_IFORK_PTR(ip, whichfork);
3418         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3419         ASSERT(ifp->if_ext_max ==
3420                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3421         /*
3422          * Make space in the inode incore.
3423          */
3424         xfs_iroot_realloc(ip, 1, whichfork);
3425         ifp->if_flags |= XFS_IFBROOT;
3426
3427         /*
3428          * Fill in the root.
3429          */
3430         block = ifp->if_broot;
3431         block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3432         block->bb_level = cpu_to_be16(1);
3433         block->bb_numrecs = cpu_to_be16(1);
3434         block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3435         block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3436
3437         /*
3438          * Need a cursor.  Can't allocate until bb_level is filled in.
3439          */
3440         mp = ip->i_mount;
3441         cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
3442         cur->bc_private.b.firstblock = *firstblock;
3443         cur->bc_private.b.flist = flist;
3444         cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3445         /*
3446          * Convert to a btree with two levels, one record in root.
3447          */
3448         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3449         args.tp = tp;
3450         args.mp = mp;
3451         args.firstblock = *firstblock;
3452         if (*firstblock == NULLFSBLOCK) {
3453                 args.type = XFS_ALLOCTYPE_START_BNO;
3454                 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3455         } else if (flist->xbf_low) {
3456                 args.type = XFS_ALLOCTYPE_START_BNO;
3457                 args.fsbno = *firstblock;
3458         } else {
3459                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3460                 args.fsbno = *firstblock;
3461         }
3462         args.minlen = args.maxlen = args.prod = 1;
3463         args.total = args.minleft = args.alignment = args.mod = args.isfl =
3464                 args.minalignslop = 0;
3465         args.wasdel = wasdel;
3466         *logflagsp = 0;
3467         if ((error = xfs_alloc_vextent(&args))) {
3468                 xfs_iroot_realloc(ip, -1, whichfork);
3469                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3470                 return error;
3471         }
3472         /*
3473          * Allocation can't fail, the space was reserved.
3474          */
3475         ASSERT(args.fsbno != NULLFSBLOCK);
3476         ASSERT(*firstblock == NULLFSBLOCK ||
3477                args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3478                (flist->xbf_low &&
3479                 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3480         *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3481         cur->bc_private.b.allocated++;
3482         ip->i_d.di_nblocks++;
3483         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3484         abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3485         /*
3486          * Fill in the child block.
3487          */
3488         ablock = XFS_BUF_TO_BLOCK(abp);
3489         ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3490         ablock->bb_level = 0;
3491         ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3492         ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3493         arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3494         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3495         for (cnt = i = 0; i < nextents; i++) {
3496                 ep = xfs_iext_get_ext(ifp, i);
3497                 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
3498                         arp->l0 = cpu_to_be64(ep->l0);
3499                         arp->l1 = cpu_to_be64(ep->l1);
3500                         arp++; cnt++;
3501                 }
3502         }
3503         ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3504         xfs_btree_set_numrecs(ablock, cnt);
3505
3506         /*
3507          * Fill in the root key and pointer.
3508          */
3509         kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
3510         arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3511         kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
3512         pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
3513                                                 be16_to_cpu(block->bb_level)));
3514         *pp = cpu_to_be64(args.fsbno);
3515
3516         /*
3517          * Do all this logging at the end so that
3518          * the root is at the right level.
3519          */
3520         xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
3521         xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
3522         ASSERT(*curp == NULL);
3523         *curp = cur;
3524         *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
3525         return 0;
3526 }
3527
3528 /*
3529  * Calculate the default attribute fork offset for newly created inodes.
3530  */
3531 uint
3532 xfs_default_attroffset(
3533         struct xfs_inode        *ip)
3534 {
3535         struct xfs_mount        *mp = ip->i_mount;
3536         uint                    offset;
3537
3538         if (mp->m_sb.sb_inodesize == 256) {
3539                 offset = XFS_LITINO(mp) -
3540                                 XFS_BMDR_SPACE_CALC(MINABTPTRS);
3541         } else {
3542                 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
3543         }
3544
3545         ASSERT(offset < XFS_LITINO(mp));
3546         return offset;
3547 }
3548
3549 /*
3550  * Helper routine to reset inode di_forkoff field when switching
3551  * attribute fork from local to extent format - we reset it where
3552  * possible to make space available for inline data fork extents.
3553  */
3554 STATIC void
3555 xfs_bmap_forkoff_reset(
3556         xfs_mount_t     *mp,
3557         xfs_inode_t     *ip,
3558         int             whichfork)
3559 {
3560         if (whichfork == XFS_ATTR_FORK &&
3561             ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
3562             ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
3563             ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
3564                 uint    dfl_forkoff = xfs_default_attroffset(ip) >> 3;
3565
3566                 if (dfl_forkoff > ip->i_d.di_forkoff) {
3567                         ip->i_d.di_forkoff = dfl_forkoff;
3568                         ip->i_df.if_ext_max =
3569                                 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
3570                         ip->i_afp->if_ext_max =
3571                                 XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
3572                 }
3573         }
3574 }
3575
3576 /*
3577  * Convert a local file to an extents file.
3578  * This code is out of bounds for data forks of regular files,
3579  * since the file data needs to get logged so things will stay consistent.
3580  * (The bmap-level manipulations are ok, though).
3581  */
3582 STATIC int                              /* error */
3583 xfs_bmap_local_to_extents(
3584         xfs_trans_t     *tp,            /* transaction pointer */
3585         xfs_inode_t     *ip,            /* incore inode pointer */
3586         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
3587         xfs_extlen_t    total,          /* total blocks needed by transaction */
3588         int             *logflagsp,     /* inode logging flags */
3589         int             whichfork)      /* data or attr fork */
3590 {
3591         int             error;          /* error return value */
3592         int             flags;          /* logging flags returned */
3593         xfs_ifork_t     *ifp;           /* inode fork pointer */
3594
3595         /*
3596          * We don't want to deal with the case of keeping inode data inline yet.
3597          * So sending the data fork of a regular inode is invalid.
3598          */
3599         ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
3600                  whichfork == XFS_DATA_FORK));
3601         ifp = XFS_IFORK_PTR(ip, whichfork);
3602         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3603         flags = 0;
3604         error = 0;
3605         if (ifp->if_bytes) {
3606                 xfs_alloc_arg_t args;   /* allocation arguments */
3607                 xfs_buf_t       *bp;    /* buffer for extent block */
3608                 xfs_bmbt_rec_host_t *ep;/* extent record pointer */
3609
3610                 args.tp = tp;
3611                 args.mp = ip->i_mount;
3612                 args.firstblock = *firstblock;
3613                 ASSERT((ifp->if_flags &
3614                         (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
3615                 /*
3616                  * Allocate a block.  We know we need only one, since the
3617                  * file currently fits in an inode.
3618                  */
3619                 if (*firstblock == NULLFSBLOCK) {
3620                         args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3621                         args.type = XFS_ALLOCTYPE_START_BNO;
3622                 } else {
3623                         args.fsbno = *firstblock;
3624                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
3625                 }
3626                 args.total = total;
3627                 args.mod = args.minleft = args.alignment = args.wasdel =
3628                         args.isfl = args.minalignslop = 0;
3629                 args.minlen = args.maxlen = args.prod = 1;
3630                 if ((error = xfs_alloc_vextent(&args)))
3631                         goto done;
3632                 /*
3633                  * Can't fail, the space was reserved.
3634                  */
3635                 ASSERT(args.fsbno != NULLFSBLOCK);
3636                 ASSERT(args.len == 1);
3637                 *firstblock = args.fsbno;
3638                 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3639                 memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
3640                         ifp->if_bytes);
3641                 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3642                 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3643                 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3644                 xfs_iext_add(ifp, 0, 1);
3645                 ep = xfs_iext_get_ext(ifp, 0);
3646                 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3647                 XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
3648                 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3649                 ip->i_d.di_nblocks = 1;
3650                 xfs_trans_mod_dquot_byino(tp, ip,
3651                         XFS_TRANS_DQ_BCOUNT, 1L);
3652                 flags |= xfs_ilog_fext(whichfork);
3653         } else {
3654                 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3655                 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3656         }
3657         ifp->if_flags &= ~XFS_IFINLINE;
3658         ifp->if_flags |= XFS_IFEXTENTS;
3659         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3660         flags |= XFS_ILOG_CORE;
3661 done:
3662         *logflagsp = flags;
3663         return error;
3664 }
3665
3666 /*
3667  * Search the extent records for the entry containing block bno.
3668  * If bno lies in a hole, point to the next entry.  If bno lies
3669  * past eof, *eofp will be set, and *prevp will contain the last
3670  * entry (null if none).  Else, *lastxp will be set to the index
3671  * of the found entry; *gotp will contain the entry.
3672  */
3673 STATIC xfs_bmbt_rec_host_t *            /* pointer to found extent entry */
3674 xfs_bmap_search_multi_extents(
3675         xfs_ifork_t     *ifp,           /* inode fork pointer */
3676         xfs_fileoff_t   bno,            /* block number searched for */
3677         int             *eofp,          /* out: end of file found */
3678         xfs_extnum_t    *lastxp,        /* out: last extent index */
3679         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3680         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3681 {
3682         xfs_bmbt_rec_host_t *ep;                /* extent record pointer */
3683         xfs_extnum_t    lastx;          /* last extent index */
3684
3685         /*
3686          * Initialize the extent entry structure to catch access to
3687          * uninitialized br_startblock field.
3688          */
3689         gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3690         gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3691         gotp->br_state = XFS_EXT_INVALID;
3692 #if XFS_BIG_BLKNOS
3693         gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3694 #else
3695         gotp->br_startblock = 0xffffa5a5;
3696 #endif
3697         prevp->br_startoff = NULLFILEOFF;
3698
3699         ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3700         if (lastx > 0) {
3701                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
3702         }
3703         if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3704                 xfs_bmbt_get_all(ep, gotp);
3705                 *eofp = 0;
3706         } else {
3707                 if (lastx > 0) {
3708                         *gotp = *prevp;
3709                 }
3710                 *eofp = 1;
3711                 ep = NULL;
3712         }
3713         *lastxp = lastx;
3714         return ep;
3715 }
3716
3717 /*
3718  * Search the extents list for the inode, for the extent containing bno.
3719  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
3720  * *eofp will be set, and *prevp will contain the last entry (null if none).
3721  * Else, *lastxp will be set to the index of the found
3722  * entry; *gotp will contain the entry.
3723  */
3724 STATIC xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */
3725 xfs_bmap_search_extents(
3726         xfs_inode_t     *ip,            /* incore inode pointer */
3727         xfs_fileoff_t   bno,            /* block number searched for */
3728         int             fork,           /* data or attr fork */
3729         int             *eofp,          /* out: end of file found */
3730         xfs_extnum_t    *lastxp,        /* out: last extent index */
3731         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3732         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3733 {
3734         xfs_ifork_t     *ifp;           /* inode fork pointer */
3735         xfs_bmbt_rec_host_t  *ep;            /* extent record pointer */
3736
3737         XFS_STATS_INC(xs_look_exlist);
3738         ifp = XFS_IFORK_PTR(ip, fork);
3739
3740         ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3741
3742         if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3743                      !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3744                 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
3745                                 "Access to block zero in inode %llu "
3746                                 "start_block: %llx start_off: %llx "
3747                                 "blkcnt: %llx extent-state: %x lastx: %x\n",
3748                         (unsigned long long)ip->i_ino,
3749                         (unsigned long long)gotp->br_startblock,
3750                         (unsigned long long)gotp->br_startoff,
3751                         (unsigned long long)gotp->br_blockcount,
3752                         gotp->br_state, *lastxp);
3753                 *lastxp = NULLEXTNUM;
3754                 *eofp = 1;
3755                 return NULL;
3756         }
3757         return ep;
3758 }
3759
3760
3761 #ifdef XFS_BMAP_TRACE
3762 ktrace_t        *xfs_bmap_trace_buf;
3763
3764 /*
3765  * Add a bmap trace buffer entry.  Base routine for the others.
3766  */
3767 STATIC void
3768 xfs_bmap_trace_addentry(
3769         int             opcode,         /* operation */
3770         const char      *fname,         /* function name */
3771         char            *desc,          /* operation description */
3772         xfs_inode_t     *ip,            /* incore inode pointer */
3773         xfs_extnum_t    idx,            /* index of entry(ies) */
3774         xfs_extnum_t    cnt,            /* count of entries, 1 or 2 */
3775         xfs_bmbt_rec_host_t *r1,        /* first record */
3776         xfs_bmbt_rec_host_t *r2,        /* second record or null */
3777         int             whichfork)      /* data or attr fork */
3778 {
3779         xfs_bmbt_rec_host_t tr2;
3780
3781         ASSERT(cnt == 1 || cnt == 2);
3782         ASSERT(r1 != NULL);
3783         if (cnt == 1) {
3784                 ASSERT(r2 == NULL);
3785                 r2 = &tr2;
3786                 memset(&tr2, 0, sizeof(tr2));
3787         } else
3788                 ASSERT(r2 != NULL);
3789         ktrace_enter(xfs_bmap_trace_buf,
3790                 (void *)(__psint_t)(opcode | (whichfork << 16)),
3791                 (void *)fname, (void *)desc, (void *)ip,
3792                 (void *)(__psint_t)idx,
3793                 (void *)(__psint_t)cnt,
3794                 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3795                 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3796                 (void *)(__psunsigned_t)(r1->l0 >> 32),
3797                 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3798                 (void *)(__psunsigned_t)(r1->l1 >> 32),
3799                 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3800                 (void *)(__psunsigned_t)(r2->l0 >> 32),
3801                 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3802                 (void *)(__psunsigned_t)(r2->l1 >> 32),
3803                 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3804                 );
3805         ASSERT(ip->i_xtrace);
3806         ktrace_enter(ip->i_xtrace,
3807                 (void *)(__psint_t)(opcode | (whichfork << 16)),
3808                 (void *)fname, (void *)desc, (void *)ip,
3809                 (void *)(__psint_t)idx,
3810                 (void *)(__psint_t)cnt,
3811                 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3812                 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3813                 (void *)(__psunsigned_t)(r1->l0 >> 32),
3814                 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3815                 (void *)(__psunsigned_t)(r1->l1 >> 32),
3816                 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3817                 (void *)(__psunsigned_t)(r2->l0 >> 32),
3818                 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3819                 (void *)(__psunsigned_t)(r2->l1 >> 32),
3820                 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3821                 );
3822 }
3823
3824 /*
3825  * Add bmap trace entry prior to a call to xfs_iext_remove.
3826  */
3827 STATIC void
3828 xfs_bmap_trace_delete(
3829         const char      *fname,         /* function name */
3830         char            *desc,          /* operation description */
3831         xfs_inode_t     *ip,            /* incore inode pointer */
3832         xfs_extnum_t    idx,            /* index of entry(entries) deleted */
3833         xfs_extnum_t    cnt,            /* count of entries deleted, 1 or 2 */
3834         int             whichfork)      /* data or attr fork */
3835 {
3836         xfs_ifork_t     *ifp;           /* inode fork pointer */
3837
3838         ifp = XFS_IFORK_PTR(ip, whichfork);
3839         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
3840                 cnt, xfs_iext_get_ext(ifp, idx),
3841                 cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
3842                 whichfork);
3843 }
3844
3845 /*
3846  * Add bmap trace entry prior to a call to xfs_iext_insert, or
3847  * reading in the extents list from the disk (in the btree).
3848  */
3849 STATIC void
3850 xfs_bmap_trace_insert(
3851         const char      *fname,         /* function name */
3852         char            *desc,          /* operation description */
3853         xfs_inode_t     *ip,            /* incore inode pointer */
3854         xfs_extnum_t    idx,            /* index of entry(entries) inserted */
3855         xfs_extnum_t    cnt,            /* count of entries inserted, 1 or 2 */
3856         xfs_bmbt_irec_t *r1,            /* inserted record 1 */
3857         xfs_bmbt_irec_t *r2,            /* inserted record 2 or null */
3858         int             whichfork)      /* data or attr fork */
3859 {
3860         xfs_bmbt_rec_host_t tr1;        /* compressed record 1 */
3861         xfs_bmbt_rec_host_t tr2;        /* compressed record 2 if needed */
3862
3863         xfs_bmbt_set_all(&tr1, r1);
3864         if (cnt == 2) {
3865                 ASSERT(r2 != NULL);
3866                 xfs_bmbt_set_all(&tr2, r2);
3867         } else {
3868                 ASSERT(cnt == 1);
3869                 ASSERT(r2 == NULL);
3870         }
3871         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
3872                 cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
3873 }
3874
3875 /*
3876  * Add bmap trace entry after updating an extent record in place.
3877  */
3878 STATIC void
3879 xfs_bmap_trace_post_update(
3880         const char      *fname,         /* function name */
3881         char            *desc,          /* operation description */
3882         xfs_inode_t     *ip,            /* incore inode pointer */
3883         xfs_extnum_t    idx,            /* index of entry updated */
3884         int             whichfork)      /* data or attr fork */
3885 {
3886         xfs_ifork_t     *ifp;           /* inode fork pointer */
3887
3888         ifp = XFS_IFORK_PTR(ip, whichfork);
3889         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
3890                 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3891 }
3892
3893 /*
3894  * Add bmap trace entry prior to updating an extent record in place.
3895  */
3896 STATIC void
3897 xfs_bmap_trace_pre_update(
3898         const char      *fname,         /* function name */
3899         char            *desc,          /* operation description */
3900         xfs_inode_t     *ip,            /* incore inode pointer */
3901         xfs_extnum_t    idx,            /* index of entry to be updated */
3902         int             whichfork)      /* data or attr fork */
3903 {
3904         xfs_ifork_t     *ifp;           /* inode fork pointer */
3905
3906         ifp = XFS_IFORK_PTR(ip, whichfork);
3907         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
3908                 xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3909 }
3910 #endif  /* XFS_BMAP_TRACE */
3911
3912 /*
3913  * Compute the worst-case number of indirect blocks that will be used
3914  * for ip's delayed extent of length "len".
3915  */
3916 STATIC xfs_filblks_t
3917 xfs_bmap_worst_indlen(
3918         xfs_inode_t     *ip,            /* incore inode pointer */
3919         xfs_filblks_t   len)            /* delayed extent length */
3920 {
3921         int             level;          /* btree level number */
3922         int             maxrecs;        /* maximum record count at this level */
3923         xfs_mount_t     *mp;            /* mount structure */
3924         xfs_filblks_t   rval;           /* return value */
3925
3926         mp = ip->i_mount;
3927         maxrecs = mp->m_bmap_dmxr[0];
3928         for (level = 0, rval = 0;
3929              level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3930              level++) {
3931                 len += maxrecs - 1;
3932                 do_div(len, maxrecs);
3933                 rval += len;
3934                 if (len == 1)
3935                         return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3936                                 level - 1;
3937                 if (level == 0)
3938                         maxrecs = mp->m_bmap_dmxr[1];
3939         }
3940         return rval;
3941 }
3942
3943 #if defined(XFS_RW_TRACE)
3944 STATIC void
3945 xfs_bunmap_trace(
3946         xfs_inode_t             *ip,
3947         xfs_fileoff_t           bno,
3948         xfs_filblks_t           len,
3949         int                     flags,
3950         inst_t                  *ra)
3951 {
3952         if (ip->i_rwtrace == NULL)
3953                 return;
3954         ktrace_enter(ip->i_rwtrace,
3955                 (void *)(__psint_t)XFS_BUNMAP,
3956                 (void *)ip,
3957                 (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
3958                 (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
3959                 (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
3960                 (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
3961                 (void *)(__psint_t)len,
3962                 (void *)(__psint_t)flags,
3963                 (void *)(unsigned long)current_cpu(),
3964                 (void *)ra,
3965                 (void *)0,
3966                 (void *)0,
3967                 (void *)0,
3968                 (void *)0,
3969                 (void *)0,
3970                 (void *)0);
3971 }
3972 #endif
3973
3974 /*
3975  * Convert inode from non-attributed to attributed.
3976  * Must not be in a transaction, ip must not be locked.
3977  */
3978 int                                             /* error code */
3979 xfs_bmap_add_attrfork(
3980         xfs_inode_t             *ip,            /* incore inode pointer */
3981         int                     size,           /* space new attribute needs */
3982         int                     rsvd)           /* xact may use reserved blks */
3983 {
3984         xfs_fsblock_t           firstblock;     /* 1st block/ag allocated */
3985         xfs_bmap_free_t         flist;          /* freed extent records */
3986         xfs_mount_t             *mp;            /* mount structure */
3987         xfs_trans_t             *tp;            /* transaction pointer */
3988         int                     blks;           /* space reservation */
3989         int                     version = 1;    /* superblock attr version */
3990         int                     committed;      /* xaction was committed */
3991         int                     logflags;       /* logging flags */
3992         int                     error;          /* error return value */
3993
3994         ASSERT(XFS_IFORK_Q(ip) == 0);
3995         ASSERT(ip->i_df.if_ext_max ==
3996                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3997
3998         mp = ip->i_mount;
3999         ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
4000         tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
4001         blks = XFS_ADDAFORK_SPACE_RES(mp);
4002         if (rsvd)
4003                 tp->t_flags |= XFS_TRANS_RESERVE;
4004         if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
4005                         XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
4006                 goto error0;
4007         xfs_ilock(ip, XFS_ILOCK_EXCL);
4008         error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
4009                         XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
4010                         XFS_QMOPT_RES_REGBLKS);
4011         if (error) {
4012                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4013                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
4014                 return error;
4015         }
4016         if (XFS_IFORK_Q(ip))
4017                 goto error1;
4018         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
4019                 /*
4020                  * For inodes coming from pre-6.2 filesystems.
4021                  */
4022                 ASSERT(ip->i_d.di_aformat == 0);
4023                 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
4024         }
4025         ASSERT(ip->i_d.di_anextents == 0);
4026         IHOLD(ip);
4027         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
4028         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4029         switch (ip->i_d.di_format) {
4030         case XFS_DINODE_FMT_DEV:
4031                 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
4032                 break;
4033         case XFS_DINODE_FMT_UUID:
4034                 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
4035                 break;
4036         case XFS_DINODE_FMT_LOCAL:
4037         case XFS_DINODE_FMT_EXTENTS:
4038         case XFS_DINODE_FMT_BTREE:
4039                 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
4040                 if (!ip->i_d.di_forkoff)
4041                         ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
4042                 else if (mp->m_flags & XFS_MOUNT_ATTR2)
4043                         version = 2;
4044                 break;
4045         default:
4046                 ASSERT(0);
4047                 error = XFS_ERROR(EINVAL);
4048                 goto error1;
4049         }
4050         ip->i_df.if_ext_max =
4051                 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
4052         ASSERT(ip->i_afp == NULL);
4053         ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
4054         ip->i_afp->if_ext_max =
4055                 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
4056         ip->i_afp->if_flags = XFS_IFEXTENTS;
4057         logflags = 0;
4058         xfs_bmap_init(&flist, &firstblock);
4059         switch (ip->i_d.di_format) {
4060         case XFS_DINODE_FMT_LOCAL:
4061                 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
4062                         &logflags);
4063                 break;
4064         case XFS_DINODE_FMT_EXTENTS:
4065                 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
4066                         &flist, &logflags);
4067                 break;
4068         case XFS_DINODE_FMT_BTREE:
4069                 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
4070                         &logflags);
4071                 break;
4072         default:
4073                 error = 0;
4074                 break;
4075         }
4076         if (logflags)
4077                 xfs_trans_log_inode(tp, ip, logflags);
4078         if (error)
4079                 goto error2;
4080         if (!xfs_sb_version_hasattr(&mp->m_sb) ||
4081            (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
4082                 __int64_t sbfields = 0;
4083
4084                 spin_lock(&mp->m_sb_lock);
4085                 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
4086                         xfs_sb_version_addattr(&mp->m_sb);
4087                         sbfields |= XFS_SB_VERSIONNUM;
4088                 }
4089                 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
4090                         xfs_sb_version_addattr2(&mp->m_sb);
4091                         sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
4092                 }
4093                 if (sbfields) {
4094                         spin_unlock(&mp->m_sb_lock);
4095                         xfs_mod_sb(tp, sbfields);
4096                 } else
4097                         spin_unlock(&mp->m_sb_lock);
4098         }
4099         if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
4100                 goto error2;
4101         error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES);
4102         ASSERT(ip->i_df.if_ext_max ==
4103                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
4104         return error;
4105 error2:
4106         xfs_bmap_cancel(&flist);
4107 error1:
4108         xfs_iunlock(ip, XFS_ILOCK_EXCL);
4109 error0:
4110         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
4111         ASSERT(ip->i_df.if_ext_max ==
4112                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
4113         return error;
4114 }
4115
4116 /*
4117  * Add the extent to the list of extents to be free at transaction end.
4118  * The list is maintained sorted (by block number).
4119  */
4120 /* ARGSUSED */
4121 void
4122 xfs_bmap_add_free(
4123         xfs_fsblock_t           bno,            /* fs block number of extent */
4124         xfs_filblks_t           len,            /* length of extent */
4125         xfs_bmap_free_t         *flist,         /* list of extents */
4126         xfs_mount_t             *mp)            /* mount point structure */
4127 {
4128         xfs_bmap_free_item_t    *cur;           /* current (next) element */
4129         xfs_bmap_free_item_t    *new;           /* new element */
4130         xfs_bmap_free_item_t    *prev;          /* previous element */
4131 #ifdef DEBUG
4132         xfs_agnumber_t          agno;
4133         xfs_agblock_t           agbno;
4134
4135         ASSERT(bno != NULLFSBLOCK);
4136         ASSERT(len > 0);
4137         ASSERT(len <= MAXEXTLEN);
4138         ASSERT(!isnullstartblock(bno));
4139         agno = XFS_FSB_TO_AGNO(mp, bno);
4140         agbno = XFS_FSB_TO_AGBNO(mp, bno);
4141         ASSERT(agno < mp->m_sb.sb_agcount);
4142         ASSERT(agbno < mp->m_sb.sb_agblocks);
4143         ASSERT(len < mp->m_sb.sb_agblocks);
4144         ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
4145 #endif
4146         ASSERT(xfs_bmap_free_item_zone != NULL);
4147         new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
4148         new->xbfi_startblock = bno;
4149         new->xbfi_blockcount = (xfs_extlen_t)len;
4150         for (prev = NULL, cur = flist->xbf_first;
4151              cur != NULL;
4152              prev = cur, cur = cur->xbfi_next) {
4153                 if (cur->xbfi_startblock >= bno)
4154                         break;
4155         }
4156         if (prev)
4157                 prev->xbfi_next = new;
4158         else
4159                 flist->xbf_first = new;
4160         new->xbfi_next = cur;
4161         flist->xbf_count++;
4162 }
4163
4164 /*
4165  * Compute and fill in the value of the maximum depth of a bmap btree
4166  * in this filesystem.  Done once, during mount.
4167  */
4168 void
4169 xfs_bmap_compute_maxlevels(
4170         xfs_mount_t     *mp,            /* file system mount structure */
4171         int             whichfork)      /* data or attr fork */
4172 {
4173         int             level;          /* btree level */
4174         uint            maxblocks;      /* max blocks at this level */
4175         uint            maxleafents;    /* max leaf entries possible */
4176         int             maxrootrecs;    /* max records in root block */
4177         int             minleafrecs;    /* min records in leaf block */
4178         int             minnoderecs;    /* min records in node block */
4179         int             sz;             /* root block size */
4180
4181         /*
4182          * The maximum number of extents in a file, hence the maximum
4183          * number of leaf entries, is controlled by the type of di_nextents
4184          * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
4185          * (a signed 16-bit number, xfs_aextnum_t).
4186          *
4187          * Note that we can no longer assume that if we are in ATTR1 that
4188          * the fork offset of all the inodes will be
4189          * (xfs_default_attroffset(ip) >> 3) because we could have mounted
4190          * with ATTR2 and then mounted back with ATTR1, keeping the
4191          * di_forkoff's fixed but probably at various positions. Therefore,
4192          * for both ATTR1 and ATTR2 we have to assume the worst case scenario
4193          * of a minimum size available.
4194          */
4195         if (whichfork == XFS_DATA_FORK) {
4196                 maxleafents = MAXEXTNUM;
4197                 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
4198         } else {
4199                 maxleafents = MAXAEXTNUM;
4200                 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
4201         }
4202         maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
4203         minleafrecs = mp->m_bmap_dmnr[0];
4204         minnoderecs = mp->m_bmap_dmnr[1];
4205         maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
4206         for (level = 1; maxblocks > 1; level++) {
4207                 if (maxblocks <= maxrootrecs)
4208                         maxblocks = 1;
4209                 else
4210                         maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
4211         }
4212         mp->m_bm_maxlevels[whichfork] = level;
4213 }
4214
4215 /*
4216  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
4217  * caller.  Frees all the extents that need freeing, which must be done
4218  * last due to locking considerations.  We never free any extents in
4219  * the first transaction.  This is to allow the caller to make the first
4220  * transaction a synchronous one so that the pointers to the data being
4221  * broken in this transaction will be permanent before the data is actually
4222  * freed.  This is necessary to prevent blocks from being reallocated
4223  * and written to before the free and reallocation are actually permanent.
4224  * We do not just make the first transaction synchronous here, because
4225  * there are more efficient ways to gain the same protection in some cases
4226  * (see the file truncation code).
4227  *
4228  * Return 1 if the given transaction was committed and a new one
4229  * started, and 0 otherwise in the committed parameter.
4230  */
4231 /*ARGSUSED*/
4232 int                                             /* error */
4233 xfs_bmap_finish(
4234         xfs_trans_t             **tp,           /* transaction pointer addr */
4235         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
4236         int                     *committed)     /* xact committed or not */
4237 {
4238         xfs_efd_log_item_t      *efd;           /* extent free data */
4239         xfs_efi_log_item_t      *efi;           /* extent free intention */
4240         int                     error;          /* error return value */
4241         xfs_bmap_free_item_t    *free;          /* free extent item */
4242         unsigned int            logres;         /* new log reservation */
4243         unsigned int            logcount;       /* new log count */
4244         xfs_mount_t             *mp;            /* filesystem mount structure */
4245         xfs_bmap_free_item_t    *next;          /* next item on free list */
4246         xfs_trans_t             *ntp;           /* new transaction pointer */
4247
4248         ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
4249         if (flist->xbf_count == 0) {
4250                 *committed = 0;
4251                 return 0;
4252         }
4253         ntp = *tp;
4254         efi = xfs_trans_get_efi(ntp, flist->xbf_count);
4255         for (free = flist->xbf_first; free; free = free->xbfi_next)
4256                 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
4257                         free->xbfi_blockcount);
4258         logres = ntp->t_log_res;
4259         logcount = ntp->t_log_count;
4260         ntp = xfs_trans_dup(*tp);
4261         error = xfs_trans_commit(*tp, 0);
4262         *tp = ntp;
4263         *committed = 1;
4264         /*
4265          * We have a new transaction, so we should return committed=1,
4266          * even though we're returning an error.
4267          */
4268         if (error)
4269                 return error;
4270
4271         /*
4272          * transaction commit worked ok so we can drop the extra ticket
4273          * reference that we gained in xfs_trans_dup()
4274          */
4275         xfs_log_ticket_put(ntp->t_ticket);
4276
4277         if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
4278                         logcount)))
4279                 return error;
4280         efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
4281         for (free = flist->xbf_first; free != NULL; free = next) {
4282                 next = free->xbfi_next;
4283                 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
4284                                 free->xbfi_blockcount))) {
4285                         /*
4286                          * The bmap free list will be cleaned up at a
4287                          * higher level.  The EFI will be canceled when
4288                          * this transaction is aborted.
4289                          * Need to force shutdown here to make sure it
4290                          * happens, since this transaction may not be
4291                          * dirty yet.
4292                          */
4293                         mp = ntp->t_mountp;
4294                         if (!XFS_FORCED_SHUTDOWN(mp))
4295                                 xfs_force_shutdown(mp,
4296                                                    (error == EFSCORRUPTED) ?
4297                                                    SHUTDOWN_CORRUPT_INCORE :
4298                                                    SHUTDOWN_META_IO_ERROR);
4299                         return error;
4300                 }
4301                 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
4302                         free->xbfi_blockcount);
4303                 xfs_bmap_del_free(flist, NULL, free);
4304         }
4305         return 0;
4306 }
4307
4308 /*
4309  * Free up any items left in the list.
4310  */
4311 void
4312 xfs_bmap_cancel(
4313         xfs_bmap_free_t         *flist) /* list of bmap_free_items */
4314 {
4315         xfs_bmap_free_item_t    *free;  /* free list item */
4316         xfs_bmap_free_item_t    *next;
4317
4318         if (flist->xbf_count == 0)
4319                 return;
4320         ASSERT(flist->xbf_first != NULL);
4321         for (free = flist->xbf_first; free; free = next) {
4322                 next = free->xbfi_next;
4323                 xfs_bmap_del_free(flist, NULL, free);
4324         }
4325         ASSERT(flist->xbf_count == 0);
4326 }
4327
4328 /*
4329  * Returns the file-relative block number of the first unused block(s)
4330  * in the file with at least "len" logically contiguous blocks free.
4331  * This is the lowest-address hole if the file has holes, else the first block
4332  * past the end of file.
4333  * Return 0 if the file is currently local (in-inode).
4334  */
4335 int                                             /* error */
4336 xfs_bmap_first_unused(
4337         xfs_trans_t     *tp,                    /* transaction pointer */
4338         xfs_inode_t     *ip,                    /* incore inode */
4339         xfs_extlen_t    len,                    /* size of hole to find */
4340         xfs_fileoff_t   *first_unused,          /* unused block */
4341         int             whichfork)              /* data or attr fork */
4342 {
4343         int             error;                  /* error return value */
4344         int             idx;                    /* extent record index */
4345         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4346         xfs_fileoff_t   lastaddr;               /* last block number seen */
4347         xfs_fileoff_t   lowest;                 /* lowest useful block */
4348         xfs_fileoff_t   max;                    /* starting useful block */
4349         xfs_fileoff_t   off;                    /* offset for this block */
4350         xfs_extnum_t    nextents;               /* number of extent entries */
4351
4352         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
4353                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
4354                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
4355         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4356                 *first_unused = 0;
4357                 return 0;
4358         }
4359         ifp = XFS_IFORK_PTR(ip, whichfork);
4360         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4361             (error = xfs_iread_extents(tp, ip, whichfork)))
4362                 return error;
4363         lowest = *first_unused;
4364         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4365         for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
4366                 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
4367                 off = xfs_bmbt_get_startoff(ep);
4368                 /*
4369                  * See if the hole before this extent will work.
4370                  */
4371                 if (off >= lowest + len && off - max >= len) {
4372                         *first_unused = max;
4373                         return 0;
4374                 }
4375                 lastaddr = off + xfs_bmbt_get_blockcount(ep);
4376                 max = XFS_FILEOFF_MAX(lastaddr, lowest);
4377         }
4378         *first_unused = max;
4379         return 0;
4380 }
4381
4382 /*
4383  * Returns the file-relative block number of the last block + 1 before
4384  * last_block (input value) in the file.
4385  * This is not based on i_size, it is based on the extent records.
4386  * Returns 0 for local files, as they do not have extent records.
4387  */
4388 int                                             /* error */
4389 xfs_bmap_last_before(
4390         xfs_trans_t     *tp,                    /* transaction pointer */
4391         xfs_inode_t     *ip,                    /* incore inode */
4392         xfs_fileoff_t   *last_block,            /* last block */
4393         int             whichfork)              /* data or attr fork */
4394 {
4395         xfs_fileoff_t   bno;                    /* input file offset */
4396         int             eof;                    /* hit end of file */
4397         xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
4398         int             error;                  /* error return value */
4399         xfs_bmbt_irec_t got;                    /* current extent value */
4400         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4401         xfs_extnum_t    lastx;                  /* last extent used */
4402         xfs_bmbt_irec_t prev;                   /* previous extent value */
4403
4404         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4405             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4406             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4407                return XFS_ERROR(EIO);
4408         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4409                 *last_block = 0;
4410                 return 0;
4411         }
4412         ifp = XFS_IFORK_PTR(ip, whichfork);
4413         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4414             (error = xfs_iread_extents(tp, ip, whichfork)))
4415                 return error;
4416         bno = *last_block - 1;
4417         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4418                 &prev);
4419         if (eof || xfs_bmbt_get_startoff(ep) > bno) {
4420                 if (prev.br_startoff == NULLFILEOFF)
4421                         *last_block = 0;
4422                 else
4423                         *last_block = prev.br_startoff + prev.br_blockcount;
4424         }
4425         /*
4426          * Otherwise *last_block is already the right answer.
4427          */
4428         return 0;
4429 }
4430
4431 /*
4432  * Returns the file-relative block number of the first block past eof in
4433  * the file.  This is not based on i_size, it is based on the extent records.
4434  * Returns 0 for local files, as they do not have extent records.
4435  */
4436 int                                             /* error */
4437 xfs_bmap_last_offset(
4438         xfs_trans_t     *tp,                    /* transaction pointer */
4439         xfs_inode_t     *ip,                    /* incore inode */
4440         xfs_fileoff_t   *last_block,            /* last block */
4441         int             whichfork)              /* data or attr fork */
4442 {
4443         xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
4444         int             error;                  /* error return value */
4445         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4446         xfs_extnum_t    nextents;               /* number of extent entries */
4447
4448         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4449             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4450             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4451                return XFS_ERROR(EIO);
4452         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4453                 *last_block = 0;
4454                 return 0;
4455         }
4456         ifp = XFS_IFORK_PTR(ip, whichfork);
4457         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4458             (error = xfs_iread_extents(tp, ip, whichfork)))
4459                 return error;
4460         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4461         if (!nextents) {
4462                 *last_block = 0;
4463                 return 0;
4464         }
4465         ep = xfs_iext_get_ext(ifp, nextents - 1);
4466         *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4467         return 0;
4468 }
4469
4470 /*
4471  * Returns whether the selected fork of the inode has exactly one
4472  * block or not.  For the data fork we check this matches di_size,
4473  * implying the file's range is 0..bsize-1.
4474  */
4475 int                                     /* 1=>1 block, 0=>otherwise */
4476 xfs_bmap_one_block(
4477         xfs_inode_t     *ip,            /* incore inode */
4478         int             whichfork)      /* data or attr fork */
4479 {
4480         xfs_bmbt_rec_host_t *ep;        /* ptr to fork's extent */
4481         xfs_ifork_t     *ifp;           /* inode fork pointer */
4482         int             rval;           /* return value */
4483         xfs_bmbt_irec_t s;              /* internal version of extent */
4484
4485 #ifndef DEBUG
4486         if (whichfork == XFS_DATA_FORK) {
4487                 return ((ip->i_d.di_mode & S_IFMT) == S_IFREG) ?
4488                         (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
4489                         (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4490         }
4491 #endif  /* !DEBUG */
4492         if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4493                 return 0;
4494         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4495                 return 0;
4496         ifp = XFS_IFORK_PTR(ip, whichfork);
4497         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4498         ep = xfs_iext_get_ext(ifp, 0);
4499         xfs_bmbt_get_all(ep, &s);
4500         rval = s.br_startoff == 0 && s.br_blockcount == 1;
4501         if (rval && whichfork == XFS_DATA_FORK)
4502                 ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
4503         return rval;
4504 }
4505
4506 STATIC int
4507 xfs_bmap_sanity_check(
4508         struct xfs_mount        *mp,
4509         struct xfs_buf          *bp,
4510         int                     level)
4511 {
4512         struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
4513
4514         if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC ||
4515             be16_to_cpu(block->bb_level) != level ||
4516             be16_to_cpu(block->bb_numrecs) == 0 ||
4517             be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
4518                 return 0;
4519         return 1;
4520 }
4521
4522 /*
4523  * Read in the extents to if_extents.
4524  * All inode fields are set up by caller, we just traverse the btree
4525  * and copy the records in. If the file system cannot contain unwritten
4526  * extents, the records are checked for no "state" flags.
4527  */
4528 int                                     /* error */
4529 xfs_bmap_read_extents(
4530         xfs_trans_t             *tp,    /* transaction pointer */
4531         xfs_inode_t             *ip,    /* incore inode */
4532         int                     whichfork) /* data or attr fork */
4533 {
4534         struct xfs_btree_block  *block; /* current btree block */
4535         xfs_fsblock_t           bno;    /* block # of "block" */
4536         xfs_buf_t               *bp;    /* buffer for "block" */
4537         int                     error;  /* error return value */
4538         xfs_exntfmt_t           exntf;  /* XFS_EXTFMT_NOSTATE, if checking */
4539         xfs_extnum_t            i, j;   /* index into the extents list */
4540         xfs_ifork_t             *ifp;   /* fork structure */
4541         int                     level;  /* btree level, for checking */
4542         xfs_mount_t             *mp;    /* file system mount structure */
4543         __be64                  *pp;    /* pointer to block address */
4544         /* REFERENCED */
4545         xfs_extnum_t            room;   /* number of entries there's room for */
4546
4547         bno = NULLFSBLOCK;
4548         mp = ip->i_mount;
4549         ifp = XFS_IFORK_PTR(ip, whichfork);
4550         exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4551                                         XFS_EXTFMT_INODE(ip);
4552         block = ifp->if_broot;
4553         /*
4554          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4555          */
4556         level = be16_to_cpu(block->bb_level);
4557         ASSERT(level > 0);
4558         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
4559         bno = be64_to_cpu(*pp);
4560         ASSERT(bno != NULLDFSBNO);
4561         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4562         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
4563         /*
4564          * Go down the tree until leaf level is reached, following the first
4565          * pointer (leftmost) at each level.
4566          */
4567         while (level-- > 0) {
4568                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4569                                 XFS_BMAP_BTREE_REF)))
4570                         return error;
4571                 block = XFS_BUF_TO_BLOCK(bp);
4572                 XFS_WANT_CORRUPTED_GOTO(
4573                         xfs_bmap_sanity_check(mp, bp, level),
4574                         error0);
4575                 if (level == 0)
4576                         break;
4577                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
4578                 bno = be64_to_cpu(*pp);
4579                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
4580                 xfs_trans_brelse(tp, bp);
4581         }
4582         /*
4583          * Here with bp and block set to the leftmost leaf node in the tree.
4584          */
4585         room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4586         i = 0;
4587         /*
4588          * Loop over all leaf nodes.  Copy information to the extent records.
4589          */
4590         for (;;) {
4591                 xfs_bmbt_rec_t  *frp;
4592                 xfs_fsblock_t   nextbno;
4593                 xfs_extnum_t    num_recs;
4594                 xfs_extnum_t    start;
4595
4596
4597                 num_recs = xfs_btree_get_numrecs(block);
4598                 if (unlikely(i + num_recs > room)) {
4599                         ASSERT(i + num_recs <= room);
4600                         xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
4601                                 "corrupt dinode %Lu, (btree extents).",
4602                                 (unsigned long long) ip->i_ino);
4603                         XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4604                                          XFS_ERRLEVEL_LOW,
4605                                         ip->i_mount);
4606                         goto error0;
4607                 }
4608                 XFS_WANT_CORRUPTED_GOTO(
4609                         xfs_bmap_sanity_check(mp, bp, 0),
4610                         error0);
4611                 /*
4612                  * Read-ahead the next leaf block, if any.
4613                  */
4614                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
4615                 if (nextbno != NULLFSBLOCK)
4616                         xfs_btree_reada_bufl(mp, nextbno, 1);
4617                 /*
4618                  * Copy records into the extent records.
4619                  */
4620                 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
4621                 start = i;
4622                 for (j = 0; j < num_recs; j++, i++, frp++) {
4623                         xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
4624                         trp->l0 = be64_to_cpu(frp->l0);
4625                         trp->l1 = be64_to_cpu(frp->l1);
4626                 }
4627                 if (exntf == XFS_EXTFMT_NOSTATE) {
4628                         /*
4629                          * Check all attribute bmap btree records and
4630                          * any "older" data bmap btree records for a
4631                          * set bit in the "extent flag" position.
4632                          */
4633                         if (unlikely(xfs_check_nostate_extents(ifp,
4634                                         start, num_recs))) {
4635                                 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4636                                                  XFS_ERRLEVEL_LOW,
4637                                                  ip->i_mount);
4638                                 goto error0;
4639                         }
4640                 }
4641                 xfs_trans_brelse(tp, bp);
4642                 bno = nextbno;
4643                 /*
4644                  * If we've reached the end, stop.
4645                  */
4646                 if (bno == NULLFSBLOCK)
4647                         break;
4648                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4649                                 XFS_BMAP_BTREE_REF)))
4650                         return error;
4651                 block = XFS_BUF_TO_BLOCK(bp);
4652         }
4653         ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4654         ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4655         XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
4656         return 0;
4657 error0:
4658         xfs_trans_brelse(tp, bp);
4659         return XFS_ERROR(EFSCORRUPTED);
4660 }
4661
4662 #ifdef XFS_BMAP_TRACE
4663 /*
4664  * Add bmap trace insert entries for all the contents of the extent records.
4665  */
4666 void
4667 xfs_bmap_trace_exlist(
4668         const char      *fname,         /* function name */
4669         xfs_inode_t     *ip,            /* incore inode pointer */
4670         xfs_extnum_t    cnt,            /* count of entries in the list */
4671         int             whichfork)      /* data or attr fork */
4672 {
4673         xfs_bmbt_rec_host_t *ep;        /* current extent record */
4674         xfs_extnum_t    idx;            /* extent record index */
4675         xfs_ifork_t     *ifp;           /* inode fork pointer */
4676         xfs_bmbt_irec_t s;              /* file extent record */
4677
4678         ifp = XFS_IFORK_PTR(ip, whichfork);
4679         ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4680         for (idx = 0; idx < cnt; idx++) {
4681                 ep = xfs_iext_get_ext(ifp, idx);
4682                 xfs_bmbt_get_all(ep, &s);
4683                 XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
4684                         whichfork);
4685         }
4686 }
4687 #endif
4688
4689 #ifdef DEBUG
4690 /*
4691  * Validate that the bmbt_irecs being returned from bmapi are valid
4692  * given the callers original parameters.  Specifically check the
4693  * ranges of the returned irecs to ensure that they only extent beyond
4694  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4695  */
4696 STATIC void
4697 xfs_bmap_validate_ret(
4698         xfs_fileoff_t           bno,
4699         xfs_filblks_t           len,
4700         int                     flags,
4701         xfs_bmbt_irec_t         *mval,
4702         int                     nmap,
4703         int                     ret_nmap)
4704 {
4705         int                     i;              /* index to map values */
4706
4707         ASSERT(ret_nmap <= nmap);
4708
4709         for (i = 0; i < ret_nmap; i++) {
4710                 ASSERT(mval[i].br_blockcount > 0);
4711                 if (!(flags & XFS_BMAPI_ENTIRE)) {
4712                         ASSERT(mval[i].br_startoff >= bno);
4713                         ASSERT(mval[i].br_blockcount <= len);
4714                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4715                                bno + len);
4716                 } else {
4717                         ASSERT(mval[i].br_startoff < bno + len);
4718                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4719                                bno);
4720                 }
4721                 ASSERT(i == 0 ||
4722                        mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4723                        mval[i].br_startoff);
4724                 if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
4725                         ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4726                                mval[i].br_startblock != HOLESTARTBLOCK);
4727                 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4728                        mval[i].br_state == XFS_EXT_UNWRITTEN);
4729         }
4730 }
4731 #endif /* DEBUG */
4732
4733
4734 /*
4735  * Map file blocks to filesystem blocks.
4736  * File range is given by the bno/len pair.
4737  * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
4738  * into a hole or past eof.
4739  * Only allocates blocks from a single allocation group,
4740  * to avoid locking problems.
4741  * The returned value in "firstblock" from the first call in a transaction
4742  * must be remembered and presented to subsequent calls in "firstblock".
4743  * An upper bound for the number of blocks to be allocated is supplied to
4744  * the first call in "total"; if no allocation group has that many free
4745  * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4746  */
4747 int                                     /* error */
4748 xfs_bmapi(
4749         xfs_trans_t     *tp,            /* transaction pointer */
4750         xfs_inode_t     *ip,            /* incore inode */
4751         xfs_fileoff_t   bno,            /* starting file offs. mapped */
4752         xfs_filblks_t   len,            /* length to map in file */
4753         int             flags,          /* XFS_BMAPI_... */
4754         xfs_fsblock_t   *firstblock,    /* first allocated block
4755                                            controls a.g. for allocs */
4756         xfs_extlen_t    total,          /* total blocks needed */
4757         xfs_bmbt_irec_t *mval,          /* output: map values */
4758         int             *nmap,          /* i/o: mval size/count */
4759         xfs_bmap_free_t *flist,         /* i/o: list extents to free */
4760         xfs_extdelta_t  *delta)         /* o: change made to incore extents */
4761 {
4762         xfs_fsblock_t   abno;           /* allocated block number */
4763         xfs_extlen_t    alen;           /* allocated extent length */
4764         xfs_fileoff_t   aoff;           /* allocated file offset */
4765         xfs_bmalloca_t  bma;            /* args for xfs_bmap_alloc */
4766         xfs_btree_cur_t *cur;           /* bmap btree cursor */
4767         xfs_fileoff_t   end;            /* end of mapped file region */
4768         int             eof;            /* we've hit the end of extents */
4769         xfs_bmbt_rec_host_t *ep;        /* extent record pointer */
4770         int             error;          /* error return */
4771         xfs_bmbt_irec_t got;            /* current file extent record */
4772         xfs_ifork_t     *ifp;           /* inode fork pointer */
4773         xfs_extlen_t    indlen;         /* indirect blocks length */
4774         xfs_extnum_t    lastx;          /* last useful extent number */
4775         int             logflags;       /* flags for transaction logging */
4776         xfs_extlen_t    minleft;        /* min blocks left after allocation */
4777         xfs_extlen_t    minlen;         /* min allocation size */
4778         xfs_mount_t     *mp;            /* xfs mount structure */
4779         int             n;              /* current extent index */
4780         int             nallocs;        /* number of extents alloc'd */
4781         xfs_extnum_t    nextents;       /* number of extents in file */
4782         xfs_fileoff_t   obno;           /* old block number (offset) */
4783         xfs_bmbt_irec_t prev;           /* previous file extent record */
4784         int             tmp_logflags;   /* temp flags holder */
4785         int             whichfork;      /* data or attr fork */
4786         char            inhole;         /* current location is hole in file */
4787         char            wasdelay;       /* old extent was delayed */
4788         char            wr;             /* this is a write request */
4789         char            rt;             /* this is a realtime file */
4790 #ifdef DEBUG
4791         xfs_fileoff_t   orig_bno;       /* original block number value */
4792         int             orig_flags;     /* original flags arg value */
4793         xfs_filblks_t   orig_len;       /* original value of len arg */
4794         xfs_bmbt_irec_t *orig_mval;     /* original value of mval */
4795         int             orig_nmap;      /* original value of *nmap */
4796
4797         orig_bno = bno;
4798         orig_len = len;
4799         orig_flags = flags;
4800         orig_mval = mval;
4801         orig_nmap = *nmap;
4802 #endif
4803         ASSERT(*nmap >= 1);
4804         ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
4805         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4806                 XFS_ATTR_FORK : XFS_DATA_FORK;
4807         mp = ip->i_mount;
4808         if (unlikely(XFS_TEST_ERROR(
4809             (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4810              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4811              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4812              mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4813                 XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
4814                 return XFS_ERROR(EFSCORRUPTED);
4815         }
4816         if (XFS_FORCED_SHUTDOWN(mp))
4817                 return XFS_ERROR(EIO);
4818         rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4819         ifp = XFS_IFORK_PTR(ip, whichfork);
4820         ASSERT(ifp->if_ext_max ==
4821                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4822         if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
4823                 XFS_STATS_INC(xs_blk_mapw);
4824         else
4825                 XFS_STATS_INC(xs_blk_mapr);
4826         /*
4827          * IGSTATE flag is used to combine extents which
4828          * differ only due to the state of the extents.
4829          * This technique is used from xfs_getbmap()
4830          * when the caller does not wish to see the
4831          * separation (which is the default).
4832          *
4833          * This technique is also used when writing a
4834          * buffer which has been partially written,
4835          * (usually by being flushed during a chunkread),
4836          * to ensure one write takes place. This also
4837          * prevents a change in the xfs inode extents at
4838          * this time, intentionally. This change occurs
4839          * on completion of the write operation, in
4840          * xfs_strat_comp(), where the xfs_bmapi() call
4841          * is transactioned, and the extents combined.
4842          */
4843         if ((flags & XFS_BMAPI_IGSTATE) && wr)  /* if writing unwritten space */
4844                 wr = 0;                         /* no allocations are allowed */
4845         ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
4846         logflags = 0;
4847         nallocs = 0;
4848         cur = NULL;
4849         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4850                 ASSERT(wr && tp);
4851                 if ((error = xfs_bmap_local_to_extents(tp, ip,
4852                                 firstblock, total, &logflags, whichfork)))
4853                         goto error0;
4854         }
4855         if (wr && *firstblock == NULLFSBLOCK) {
4856                 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4857                         minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4858                 else
4859                         minleft = 1;
4860         } else
4861                 minleft = 0;
4862         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4863             (error = xfs_iread_extents(tp, ip, whichfork)))
4864                 goto error0;
4865         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4866                 &prev);
4867         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4868         n = 0;
4869         end = bno + len;
4870         obno = bno;
4871         bma.ip = NULL;
4872         if (delta) {
4873                 delta->xed_startoff = NULLFILEOFF;
4874                 delta->xed_blockcount = 0;
4875         }
4876         while (bno < end && n < *nmap) {
4877                 /*
4878                  * Reading past eof, act as though there's a hole
4879                  * up to end.
4880                  */
4881                 if (eof && !wr)
4882                         got.br_startoff = end;
4883                 inhole = eof || got.br_startoff > bno;
4884                 wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
4885                         isnullstartblock(got.br_startblock);
4886                 /*
4887                  * First, deal with the hole before the allocated space
4888                  * that we found, if any.
4889                  */
4890                 if (wr && (inhole || wasdelay)) {
4891                         /*
4892                          * For the wasdelay case, we could also just
4893                          * allocate the stuff asked for in this bmap call
4894                          * but that wouldn't be as good.
4895                          */
4896                         if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
4897                                 alen = (xfs_extlen_t)got.br_blockcount;
4898                                 aoff = got.br_startoff;
4899                                 if (lastx != NULLEXTNUM && lastx) {
4900                                         ep = xfs_iext_get_ext(ifp, lastx - 1);
4901                                         xfs_bmbt_get_all(ep, &prev);
4902                                 }
4903                         } else if (wasdelay) {
4904                                 alen = (xfs_extlen_t)
4905                                         XFS_FILBLKS_MIN(len,
4906                                                 (got.br_startoff +
4907                                                  got.br_blockcount) - bno);
4908                                 aoff = bno;
4909                         } else {
4910                                 alen = (xfs_extlen_t)
4911                                         XFS_FILBLKS_MIN(len, MAXEXTLEN);
4912                                 if (!eof)
4913                                         alen = (xfs_extlen_t)
4914                                                 XFS_FILBLKS_MIN(alen,
4915                                                         got.br_startoff - bno);
4916                                 aoff = bno;
4917                         }
4918                         minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
4919                         if (flags & XFS_BMAPI_DELAY) {
4920                                 xfs_extlen_t    extsz;
4921
4922                                 /* Figure out the extent size, adjust alen */
4923                                 extsz = xfs_get_extsz_hint(ip);
4924                                 if (extsz) {
4925                                         error = xfs_bmap_extsize_align(mp,
4926                                                         &got, &prev, extsz,
4927                                                         rt, eof,
4928                                                         flags&XFS_BMAPI_DELAY,
4929                                                         flags&XFS_BMAPI_CONVERT,
4930                                                         &aoff, &alen);
4931                                         ASSERT(!error);
4932                                 }
4933
4934                                 if (rt)
4935                                         extsz = alen / mp->m_sb.sb_rextsize;
4936
4937                                 /*
4938                                  * Make a transaction-less quota reservation for
4939                                  * delayed allocation blocks. This number gets
4940                                  * adjusted later.  We return if we haven't
4941                                  * allocated blocks already inside this loop.
4942                                  */
4943                                 error = xfs_trans_reserve_quota_nblks(
4944                                                 NULL, ip, (long)alen, 0,
4945                                                 rt ? XFS_QMOPT_RES_RTBLKS :
4946                                                      XFS_QMOPT_RES_REGBLKS);
4947                                 if (error) {
4948                                         if (n == 0) {
4949                                                 *nmap = 0;
4950                                                 ASSERT(cur == NULL);
4951                                                 return error;
4952                                         }
4953                                         break;
4954                                 }
4955
4956                                 /*
4957                                  * Split changing sb for alen and indlen since
4958                                  * they could be coming from different places.
4959                                  */
4960                                 indlen = (xfs_extlen_t)
4961                                         xfs_bmap_worst_indlen(ip, alen);
4962                                 ASSERT(indlen > 0);
4963
4964                                 if (rt) {
4965                                         error = xfs_mod_incore_sb(mp,
4966                                                         XFS_SBS_FREXTENTS,
4967                                                         -((int64_t)extsz), (flags &
4968                                                         XFS_BMAPI_RSVBLOCKS));
4969                                 } else {
4970                                         error = xfs_mod_incore_sb(mp,
4971                                                         XFS_SBS_FDBLOCKS,
4972                                                         -((int64_t)alen), (flags &
4973                                                         XFS_BMAPI_RSVBLOCKS));
4974                                 }
4975                                 if (!error) {
4976                                         error = xfs_mod_incore_sb(mp,
4977                                                         XFS_SBS_FDBLOCKS,
4978                                                         -((int64_t)indlen), (flags &
4979                                                         XFS_BMAPI_RSVBLOCKS));
4980                                         if (error && rt)
4981                                                 xfs_mod_incore_sb(mp,
4982                                                         XFS_SBS_FREXTENTS,
4983                                                         (int64_t)extsz, (flags &
4984                                                         XFS_BMAPI_RSVBLOCKS));
4985                                         else if (error)
4986                                                 xfs_mod_incore_sb(mp,
4987                                                         XFS_SBS_FDBLOCKS,
4988                                                         (int64_t)alen, (flags &
4989                                                         XFS_BMAPI_RSVBLOCKS));
4990                                 }
4991
4992                                 if (error) {
4993                                         if (XFS_IS_QUOTA_ON(mp))
4994                                                 /* unreserve the blocks now */
4995                                                 (void)
4996                                                 xfs_trans_unreserve_quota_nblks(
4997                                                         NULL, ip,
4998                                                         (long)alen, 0, rt ?
4999                                                         XFS_QMOPT_RES_RTBLKS :
5000                                                         XFS_QMOPT_RES_REGBLKS);
5001                                         break;
5002                                 }
5003
5004                                 ip->i_delayed_blks += alen;
5005                                 abno = nullstartblock(indlen);
5006                         } else {
5007                                 /*
5008                                  * If first time, allocate and fill in
5009                                  * once-only bma fields.
5010                                  */
5011                                 if (bma.ip == NULL) {
5012                                         bma.tp = tp;
5013                                         bma.ip = ip;
5014                                         bma.prevp = &prev;
5015                                         bma.gotp = &got;
5016                                         bma.total = total;
5017                                         bma.userdata = 0;
5018                                 }
5019                                 /* Indicate if this is the first user data
5020                                  * in the file, or just any user data.
5021                                  */
5022                                 if (!(flags & XFS_BMAPI_METADATA)) {
5023                                         bma.userdata = (aoff == 0) ?
5024                                                 XFS_ALLOC_INITIAL_USER_DATA :
5025                                                 XFS_ALLOC_USERDATA;
5026                                 }
5027                                 /*
5028                                  * Fill in changeable bma fields.
5029                                  */
5030                                 bma.eof = eof;
5031                                 bma.firstblock = *firstblock;
5032                                 bma.alen = alen;
5033                                 bma.off = aoff;
5034                                 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
5035                                 bma.wasdel = wasdelay;
5036                                 bma.minlen = minlen;
5037                                 bma.low = flist->xbf_low;
5038                                 bma.minleft = minleft;
5039                                 /*
5040                                  * Only want to do the alignment at the
5041                                  * eof if it is userdata and allocation length
5042                                  * is larger than a stripe unit.
5043                                  */
5044                                 if (mp->m_dalign && alen >= mp->m_dalign &&
5045                                     (!(flags & XFS_BMAPI_METADATA)) &&
5046                                     (whichfork == XFS_DATA_FORK)) {
5047                                         if ((error = xfs_bmap_isaeof(ip, aoff,
5048                                                         whichfork, &bma.aeof)))
5049                                                 goto error0;
5050                                 } else
5051                                         bma.aeof = 0;
5052                                 /*
5053                                  * Call allocator.
5054                                  */
5055                                 if ((error = xfs_bmap_alloc(&bma)))
5056                                         goto error0;
5057                                 /*
5058                                  * Copy out result fields.
5059                                  */
5060                                 abno = bma.rval;
5061                                 if ((flist->xbf_low = bma.low))
5062                                         minleft = 0;
5063                                 alen = bma.alen;
5064                                 aoff = bma.off;
5065                                 ASSERT(*firstblock == NULLFSBLOCK ||
5066                                        XFS_FSB_TO_AGNO(mp, *firstblock) ==
5067                                        XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
5068                                        (flist->xbf_low &&
5069                                         XFS_FSB_TO_AGNO(mp, *firstblock) <
5070                                         XFS_FSB_TO_AGNO(mp, bma.firstblock)));
5071                                 *firstblock = bma.firstblock;
5072                                 if (cur)
5073                                         cur->bc_private.b.firstblock =
5074                                                 *firstblock;
5075                                 if (abno == NULLFSBLOCK)
5076                                         break;
5077                                 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
5078                                         cur = xfs_bmbt_init_cursor(mp, tp,
5079                                                 ip, whichfork);
5080                                         cur->bc_private.b.firstblock =
5081                                                 *firstblock;
5082                                         cur->bc_private.b.flist = flist;
5083                                 }
5084                                 /*
5085                                  * Bump the number of extents we've allocated
5086                                  * in this call.
5087                                  */
5088                                 nallocs++;
5089                         }
5090                         if (cur)
5091                                 cur->bc_private.b.flags =
5092                                         wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
5093                         got.br_startoff = aoff;
5094                         got.br_startblock = abno;
5095                         got.br_blockcount = alen;
5096                         got.br_state = XFS_EXT_NORM;    /* assume normal */
5097                         /*
5098                          * Determine state of extent, and the filesystem.
5099                          * A wasdelay extent has been initialized, so
5100                          * shouldn't be flagged as unwritten.
5101                          */
5102                         if (wr && xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5103                                 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
5104                                         got.br_state = XFS_EXT_UNWRITTEN;
5105                         }
5106                         error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
5107                                 firstblock, flist, &tmp_logflags, delta,
5108                                 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
5109                         logflags |= tmp_logflags;
5110                         if (error)
5111                                 goto error0;
5112                         lastx = ifp->if_lastex;
5113                         ep = xfs_iext_get_ext(ifp, lastx);
5114                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5115                         xfs_bmbt_get_all(ep, &got);
5116                         ASSERT(got.br_startoff <= aoff);
5117                         ASSERT(got.br_startoff + got.br_blockcount >=
5118                                 aoff + alen);
5119 #ifdef DEBUG
5120                         if (flags & XFS_BMAPI_DELAY) {
5121                                 ASSERT(isnullstartblock(got.br_startblock));
5122                                 ASSERT(startblockval(got.br_startblock) > 0);
5123                         }
5124                         ASSERT(got.br_state == XFS_EXT_NORM ||
5125                                got.br_state == XFS_EXT_UNWRITTEN);
5126 #endif
5127                         /*
5128                          * Fall down into the found allocated space case.
5129                          */
5130                 } else if (inhole) {
5131                         /*
5132                          * Reading in a hole.
5133                          */
5134                         mval->br_startoff = bno;
5135                         mval->br_startblock = HOLESTARTBLOCK;
5136                         mval->br_blockcount =
5137                                 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
5138                         mval->br_state = XFS_EXT_NORM;
5139                         bno += mval->br_blockcount;
5140                         len -= mval->br_blockcount;
5141                         mval++;
5142                         n++;
5143                         continue;
5144                 }
5145                 /*
5146                  * Then deal with the allocated space we found.
5147                  */
5148                 ASSERT(ep != NULL);
5149                 if (!(flags & XFS_BMAPI_ENTIRE) &&
5150                     (got.br_startoff + got.br_blockcount > obno)) {
5151                         if (obno > bno)
5152                                 bno = obno;
5153                         ASSERT((bno >= obno) || (n == 0));
5154                         ASSERT(bno < end);
5155                         mval->br_startoff = bno;
5156                         if (isnullstartblock(got.br_startblock)) {
5157                                 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
5158                                 mval->br_startblock = DELAYSTARTBLOCK;
5159                         } else
5160                                 mval->br_startblock =
5161                                         got.br_startblock +
5162                                         (bno - got.br_startoff);
5163                         /*
5164                          * Return the minimum of what we got and what we
5165                          * asked for for the length.  We can use the len
5166                          * variable here because it is modified below
5167                          * and we could have been there before coming
5168                          * here if the first part of the allocation
5169                          * didn't overlap what was asked for.
5170                          */
5171                         mval->br_blockcount =
5172                                 XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
5173                                         (bno - got.br_startoff));
5174                         mval->br_state = got.br_state;
5175                         ASSERT(mval->br_blockcount <= len);
5176                 } else {
5177                         *mval = got;
5178                         if (isnullstartblock(mval->br_startblock)) {
5179                                 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
5180                                 mval->br_startblock = DELAYSTARTBLOCK;
5181                         }
5182                 }
5183
5184                 /*
5185                  * Check if writing previously allocated but
5186                  * unwritten extents.
5187                  */
5188                 if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
5189                     ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
5190                         /*
5191                          * Modify (by adding) the state flag, if writing.
5192                          */
5193                         ASSERT(mval->br_blockcount <= len);
5194                         if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
5195                                 cur = xfs_bmbt_init_cursor(mp,
5196                                         tp, ip, whichfork);
5197                                 cur->bc_private.b.firstblock =
5198                                         *firstblock;
5199                                 cur->bc_private.b.flist = flist;
5200                         }
5201                         mval->br_state = XFS_EXT_NORM;
5202                         error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
5203                                 firstblock, flist, &tmp_logflags, delta,
5204                                 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
5205                         logflags |= tmp_logflags;
5206                         if (error)
5207                                 goto error0;
5208                         lastx = ifp->if_lastex;
5209                         ep = xfs_iext_get_ext(ifp, lastx);
5210                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5211                         xfs_bmbt_get_all(ep, &got);
5212                         /*
5213                          * We may have combined previously unwritten
5214                          * space with written space, so generate
5215                          * another request.
5216                          */
5217                         if (mval->br_blockcount < len)
5218                                 continue;
5219                 }
5220
5221                 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
5222                        ((mval->br_startoff + mval->br_blockcount) <= end));
5223                 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
5224                        (mval->br_blockcount <= len) ||
5225                        (mval->br_startoff < obno));
5226                 bno = mval->br_startoff + mval->br_blockcount;
5227                 len = end - bno;
5228                 if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
5229                         ASSERT(mval->br_startblock == mval[-1].br_startblock);
5230                         ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
5231                         ASSERT(mval->br_state == mval[-1].br_state);
5232                         mval[-1].br_blockcount = mval->br_blockcount;
5233                         mval[-1].br_state = mval->br_state;
5234                 } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
5235                            mval[-1].br_startblock != DELAYSTARTBLOCK &&
5236                            mval[-1].br_startblock != HOLESTARTBLOCK &&
5237                            mval->br_startblock ==
5238                            mval[-1].br_startblock + mval[-1].br_blockcount &&
5239                            ((flags & XFS_BMAPI_IGSTATE) ||
5240                                 mval[-1].br_state == mval->br_state)) {
5241                         ASSERT(mval->br_startoff ==
5242                                mval[-1].br_startoff + mval[-1].br_blockcount);
5243                         mval[-1].br_blockcount += mval->br_blockcount;
5244                 } else if (n > 0 &&
5245                            mval->br_startblock == DELAYSTARTBLOCK &&
5246                            mval[-1].br_startblock == DELAYSTARTBLOCK &&
5247                            mval->br_startoff ==
5248                            mval[-1].br_startoff + mval[-1].br_blockcount) {
5249                         mval[-1].br_blockcount += mval->br_blockcount;
5250                         mval[-1].br_state = mval->br_state;
5251                 } else if (!((n == 0) &&
5252                              ((mval->br_startoff + mval->br_blockcount) <=
5253                               obno))) {
5254                         mval++;
5255                         n++;
5256                 }
5257                 /*
5258                  * If we're done, stop now.  Stop when we've allocated
5259                  * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
5260                  * the transaction may get too big.
5261                  */
5262                 if (bno >= end || n >= *nmap || nallocs >= *nmap)
5263                         break;
5264                 /*
5265                  * Else go on to the next record.
5266                  */
5267                 ep = xfs_iext_get_ext(ifp, ++lastx);
5268                 prev = got;
5269                 if (lastx >= nextents)
5270                         eof = 1;
5271                 else
5272                         xfs_bmbt_get_all(ep, &got);
5273         }
5274         ifp->if_lastex = lastx;
5275         *nmap = n;
5276         /*
5277          * Transform from btree to extents, give it cur.
5278          */
5279         if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5280             XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5281                 ASSERT(wr && cur);
5282                 error = xfs_bmap_btree_to_extents(tp, ip, cur,
5283                         &tmp_logflags, whichfork);
5284                 logflags |= tmp_logflags;
5285                 if (error)
5286                         goto error0;
5287         }
5288         ASSERT(ifp->if_ext_max ==
5289                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5290         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5291                XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5292         error = 0;
5293         if (delta && delta->xed_startoff != NULLFILEOFF) {
5294                 /* A change was actually made.
5295                  * Note that delta->xed_blockount is an offset at this
5296                  * point and needs to be converted to a block count.
5297                  */
5298                 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5299                 delta->xed_blockcount -= delta->xed_startoff;
5300         }
5301 error0:
5302         /*
5303          * Log everything.  Do this after conversion, there's no point in
5304          * logging the extent records if we've converted to btree format.
5305          */
5306         if ((logflags & xfs_ilog_fext(whichfork)) &&
5307             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5308                 logflags &= ~xfs_ilog_fext(whichfork);
5309         else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5310                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5311                 logflags &= ~xfs_ilog_fbroot(whichfork);
5312         /*
5313          * Log whatever the flags say, even if error.  Otherwise we might miss
5314          * detecting a case where the data is changed, there's an error,
5315          * and it's not logged so we don't shutdown when we should.
5316          */
5317         if (logflags) {
5318                 ASSERT(tp && wr);
5319                 xfs_trans_log_inode(tp, ip, logflags);
5320         }
5321         if (cur) {
5322                 if (!error) {
5323                         ASSERT(*firstblock == NULLFSBLOCK ||
5324                                XFS_FSB_TO_AGNO(mp, *firstblock) ==
5325                                XFS_FSB_TO_AGNO(mp,
5326                                        cur->bc_private.b.firstblock) ||
5327                                (flist->xbf_low &&
5328                                 XFS_FSB_TO_AGNO(mp, *firstblock) <
5329                                 XFS_FSB_TO_AGNO(mp,
5330                                         cur->bc_private.b.firstblock)));
5331                         *firstblock = cur->bc_private.b.firstblock;
5332                 }
5333                 xfs_btree_del_cursor(cur,
5334                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5335         }
5336         if (!error)
5337                 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5338                         orig_nmap, *nmap);
5339         return error;
5340 }
5341
5342 /*
5343  * Map file blocks to filesystem blocks, simple version.
5344  * One block (extent) only, read-only.
5345  * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
5346  * For the other flag values, the effect is as if XFS_BMAPI_METADATA
5347  * was set and all the others were clear.
5348  */
5349 int                                             /* error */
5350 xfs_bmapi_single(
5351         xfs_trans_t     *tp,            /* transaction pointer */
5352         xfs_inode_t     *ip,            /* incore inode */
5353         int             whichfork,      /* data or attr fork */
5354         xfs_fsblock_t   *fsb,           /* output: mapped block */
5355         xfs_fileoff_t   bno)            /* starting file offs. mapped */
5356 {
5357         int             eof;            /* we've hit the end of extents */
5358         int             error;          /* error return */
5359         xfs_bmbt_irec_t got;            /* current file extent record */
5360         xfs_ifork_t     *ifp;           /* inode fork pointer */
5361         xfs_extnum_t    lastx;          /* last useful extent number */
5362         xfs_bmbt_irec_t prev;           /* previous file extent record */
5363
5364         ifp = XFS_IFORK_PTR(ip, whichfork);
5365         if (unlikely(
5366             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
5367             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
5368                XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
5369                                 ip->i_mount);
5370                return XFS_ERROR(EFSCORRUPTED);
5371         }
5372         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5373                 return XFS_ERROR(EIO);
5374         XFS_STATS_INC(xs_blk_mapr);
5375         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5376             (error = xfs_iread_extents(tp, ip, whichfork)))
5377                 return error;
5378         (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5379                 &prev);
5380         /*
5381          * Reading past eof, act as though there's a hole
5382          * up to end.
5383          */
5384         if (eof || got.br_startoff > bno) {
5385                 *fsb = NULLFSBLOCK;
5386                 return 0;
5387         }
5388         ASSERT(!isnullstartblock(got.br_startblock));
5389         ASSERT(bno < got.br_startoff + got.br_blockcount);
5390         *fsb = got.br_startblock + (bno - got.br_startoff);
5391         ifp->if_lastex = lastx;
5392         return 0;
5393 }
5394
5395 /*
5396  * Unmap (remove) blocks from a file.
5397  * If nexts is nonzero then the number of extents to remove is limited to
5398  * that value.  If not all extents in the block range can be removed then
5399  * *done is set.
5400  */
5401 int                                             /* error */
5402 xfs_bunmapi(
5403         xfs_trans_t             *tp,            /* transaction pointer */
5404         struct xfs_inode        *ip,            /* incore inode */
5405         xfs_fileoff_t           bno,            /* starting offset to unmap */
5406         xfs_filblks_t           len,            /* length to unmap in file */
5407         int                     flags,          /* misc flags */
5408         xfs_extnum_t            nexts,          /* number of extents max */
5409         xfs_fsblock_t           *firstblock,    /* first allocated block
5410                                                    controls a.g. for allocs */
5411         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
5412         xfs_extdelta_t          *delta,         /* o: change made to incore
5413                                                    extents */
5414         int                     *done)          /* set if not done yet */
5415 {
5416         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
5417         xfs_bmbt_irec_t         del;            /* extent being deleted */
5418         int                     eof;            /* is deleting at eof */
5419         xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
5420         int                     error;          /* error return value */
5421         xfs_extnum_t            extno;          /* extent number in list */
5422         xfs_bmbt_irec_t         got;            /* current extent record */
5423         xfs_ifork_t             *ifp;           /* inode fork pointer */
5424         int                     isrt;           /* freeing in rt area */
5425         xfs_extnum_t            lastx;          /* last extent index used */
5426         int                     logflags;       /* transaction logging flags */
5427         xfs_extlen_t            mod;            /* rt extent offset */
5428         xfs_mount_t             *mp;            /* mount structure */
5429         xfs_extnum_t            nextents;       /* number of file extents */
5430         xfs_bmbt_irec_t         prev;           /* previous extent record */
5431         xfs_fileoff_t           start;          /* first file offset deleted */
5432         int                     tmp_logflags;   /* partial logging flags */
5433         int                     wasdel;         /* was a delayed alloc extent */
5434         int                     whichfork;      /* data or attribute fork */
5435         int                     rsvd;           /* OK to allocate reserved blocks */
5436         xfs_fsblock_t           sum;
5437
5438         xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
5439         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5440                 XFS_ATTR_FORK : XFS_DATA_FORK;
5441         ifp = XFS_IFORK_PTR(ip, whichfork);
5442         if (unlikely(
5443             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5444             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5445                 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5446                                  ip->i_mount);
5447                 return XFS_ERROR(EFSCORRUPTED);
5448         }
5449         mp = ip->i_mount;
5450         if (XFS_FORCED_SHUTDOWN(mp))
5451                 return XFS_ERROR(EIO);
5452         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
5453         ASSERT(len > 0);
5454         ASSERT(nexts >= 0);
5455         ASSERT(ifp->if_ext_max ==
5456                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5457         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5458             (error = xfs_iread_extents(tp, ip, whichfork)))
5459                 return error;
5460         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5461         if (nextents == 0) {
5462                 *done = 1;
5463                 return 0;
5464         }
5465         XFS_STATS_INC(xs_blk_unmap);
5466         isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5467         start = bno;
5468         bno = start + len - 1;
5469         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5470                 &prev);
5471         if (delta) {
5472                 delta->xed_startoff = NULLFILEOFF;
5473                 delta->xed_blockcount = 0;
5474         }
5475         /*
5476          * Check to see if the given block number is past the end of the
5477          * file, back up to the last block if so...
5478          */
5479         if (eof) {
5480                 ep = xfs_iext_get_ext(ifp, --lastx);
5481                 xfs_bmbt_get_all(ep, &got);
5482                 bno = got.br_startoff + got.br_blockcount - 1;
5483         }
5484         logflags = 0;
5485         if (ifp->if_flags & XFS_IFBROOT) {
5486                 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5487                 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5488                 cur->bc_private.b.firstblock = *firstblock;
5489                 cur->bc_private.b.flist = flist;
5490                 cur->bc_private.b.flags = 0;
5491         } else
5492                 cur = NULL;
5493         extno = 0;
5494         while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5495                (nexts == 0 || extno < nexts)) {
5496                 /*
5497                  * Is the found extent after a hole in which bno lives?
5498                  * Just back up to the previous extent, if so.
5499                  */
5500                 if (got.br_startoff > bno) {
5501                         if (--lastx < 0)
5502                                 break;
5503                         ep = xfs_iext_get_ext(ifp, lastx);
5504                         xfs_bmbt_get_all(ep, &got);
5505                 }
5506                 /*
5507                  * Is the last block of this extent before the range
5508                  * we're supposed to delete?  If so, we're done.
5509                  */
5510                 bno = XFS_FILEOFF_MIN(bno,
5511                         got.br_startoff + got.br_blockcount - 1);
5512                 if (bno < start)
5513                         break;
5514                 /*
5515                  * Then deal with the (possibly delayed) allocated space
5516                  * we found.
5517                  */
5518                 ASSERT(ep != NULL);
5519                 del = got;
5520                 wasdel = isnullstartblock(del.br_startblock);
5521                 if (got.br_startoff < start) {
5522                         del.br_startoff = start;
5523                         del.br_blockcount -= start - got.br_startoff;
5524                         if (!wasdel)
5525                                 del.br_startblock += start - got.br_startoff;
5526                 }
5527                 if (del.br_startoff + del.br_blockcount > bno + 1)
5528                         del.br_blockcount = bno + 1 - del.br_startoff;
5529                 sum = del.br_startblock + del.br_blockcount;
5530                 if (isrt &&
5531                     (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5532                         /*
5533                          * Realtime extent not lined up at the end.
5534                          * The extent could have been split into written
5535                          * and unwritten pieces, or we could just be
5536                          * unmapping part of it.  But we can't really
5537                          * get rid of part of a realtime extent.
5538                          */
5539                         if (del.br_state == XFS_EXT_UNWRITTEN ||
5540                             !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5541                                 /*
5542                                  * This piece is unwritten, or we're not
5543                                  * using unwritten extents.  Skip over it.
5544                                  */
5545                                 ASSERT(bno >= mod);
5546                                 bno -= mod > del.br_blockcount ?
5547                                         del.br_blockcount : mod;
5548                                 if (bno < got.br_startoff) {
5549                                         if (--lastx >= 0)
5550                                                 xfs_bmbt_get_all(xfs_iext_get_ext(
5551                                                         ifp, lastx), &got);
5552                                 }
5553                                 continue;
5554                         }
5555                         /*
5556                          * It's written, turn it unwritten.
5557                          * This is better than zeroing it.
5558                          */
5559                         ASSERT(del.br_state == XFS_EXT_NORM);
5560                         ASSERT(xfs_trans_get_block_res(tp) > 0);
5561                         /*
5562                          * If this spans a realtime extent boundary,
5563                          * chop it back to the start of the one we end at.
5564                          */
5565                         if (del.br_blockcount > mod) {
5566                                 del.br_startoff += del.br_blockcount - mod;
5567                                 del.br_startblock += del.br_blockcount - mod;
5568                                 del.br_blockcount = mod;
5569                         }
5570                         del.br_state = XFS_EXT_UNWRITTEN;
5571                         error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
5572                                 firstblock, flist, &logflags, delta,
5573                                 XFS_DATA_FORK, 0);
5574                         if (error)
5575                                 goto error0;
5576                         goto nodelete;
5577                 }
5578                 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5579                         /*
5580                          * Realtime extent is lined up at the end but not
5581                          * at the front.  We'll get rid of full extents if
5582                          * we can.
5583                          */
5584                         mod = mp->m_sb.sb_rextsize - mod;
5585                         if (del.br_blockcount > mod) {
5586                                 del.br_blockcount -= mod;
5587                                 del.br_startoff += mod;
5588                                 del.br_startblock += mod;
5589                         } else if ((del.br_startoff == start &&
5590                                     (del.br_state == XFS_EXT_UNWRITTEN ||
5591                                      xfs_trans_get_block_res(tp) == 0)) ||
5592                                    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5593                                 /*
5594                                  * Can't make it unwritten.  There isn't
5595                                  * a full extent here so just skip it.
5596                                  */
5597                                 ASSERT(bno >= del.br_blockcount);
5598                                 bno -= del.br_blockcount;
5599                                 if (bno < got.br_startoff) {
5600                                         if (--lastx >= 0)
5601                                                 xfs_bmbt_get_all(--ep, &got);
5602                                 }
5603                                 continue;
5604                         } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5605                                 /*
5606                                  * This one is already unwritten.
5607                                  * It must have a written left neighbor.
5608                                  * Unwrite the killed part of that one and
5609                                  * try again.
5610                                  */
5611                                 ASSERT(lastx > 0);
5612                                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5613                                                 lastx - 1), &prev);
5614                                 ASSERT(prev.br_state == XFS_EXT_NORM);
5615                                 ASSERT(!isnullstartblock(prev.br_startblock));
5616                                 ASSERT(del.br_startblock ==
5617                                        prev.br_startblock + prev.br_blockcount);
5618                                 if (prev.br_startoff < start) {
5619                                         mod = start - prev.br_startoff;
5620                                         prev.br_blockcount -= mod;
5621                                         prev.br_startblock += mod;
5622                                         prev.br_startoff = start;
5623                                 }
5624                                 prev.br_state = XFS_EXT_UNWRITTEN;
5625                                 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5626                                         &prev, firstblock, flist, &logflags,
5627                                         delta, XFS_DATA_FORK, 0);
5628                                 if (error)
5629                                         goto error0;
5630                                 goto nodelete;
5631                         } else {
5632                                 ASSERT(del.br_state == XFS_EXT_NORM);
5633                                 del.br_state = XFS_EXT_UNWRITTEN;
5634                                 error = xfs_bmap_add_extent(ip, lastx, &cur,
5635                                         &del, firstblock, flist, &logflags,
5636                                         delta, XFS_DATA_FORK, 0);
5637                                 if (error)
5638                                         goto error0;
5639                                 goto nodelete;
5640                         }
5641                 }
5642                 if (wasdel) {
5643                         ASSERT(startblockval(del.br_startblock) > 0);
5644                         /* Update realtime/data freespace, unreserve quota */
5645                         if (isrt) {
5646                                 xfs_filblks_t rtexts;
5647
5648                                 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5649                                 do_div(rtexts, mp->m_sb.sb_rextsize);
5650                                 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5651                                                 (int64_t)rtexts, rsvd);
5652                                 (void)xfs_trans_reserve_quota_nblks(NULL,
5653                                         ip, -((long)del.br_blockcount), 0,
5654                                         XFS_QMOPT_RES_RTBLKS);
5655                         } else {
5656                                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5657                                                 (int64_t)del.br_blockcount, rsvd);
5658                                 (void)xfs_trans_reserve_quota_nblks(NULL,
5659                                         ip, -((long)del.br_blockcount), 0,
5660                                         XFS_QMOPT_RES_REGBLKS);
5661                         }
5662                         ip->i_delayed_blks -= del.br_blockcount;
5663                         if (cur)
5664                                 cur->bc_private.b.flags |=
5665                                         XFS_BTCUR_BPRV_WASDEL;
5666                 } else if (cur)
5667                         cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5668                 /*
5669                  * If it's the case where the directory code is running
5670                  * with no block reservation, and the deleted block is in
5671                  * the middle of its extent, and the resulting insert
5672                  * of an extent would cause transformation to btree format,
5673                  * then reject it.  The calling code will then swap
5674                  * blocks around instead.
5675                  * We have to do this now, rather than waiting for the
5676                  * conversion to btree format, since the transaction
5677                  * will be dirty.
5678                  */
5679                 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5680                     XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5681                     XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5682                     del.br_startoff > got.br_startoff &&
5683                     del.br_startoff + del.br_blockcount <
5684                     got.br_startoff + got.br_blockcount) {
5685                         error = XFS_ERROR(ENOSPC);
5686                         goto error0;
5687                 }
5688                 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
5689                                 &tmp_logflags, delta, whichfork, rsvd);
5690                 logflags |= tmp_logflags;
5691                 if (error)
5692                         goto error0;
5693                 bno = del.br_startoff - 1;
5694 nodelete:
5695                 lastx = ifp->if_lastex;
5696                 /*
5697                  * If not done go on to the next (previous) record.
5698                  * Reset ep in case the extents array was re-alloced.
5699                  */
5700                 ep = xfs_iext_get_ext(ifp, lastx);
5701                 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5702                         if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5703                             xfs_bmbt_get_startoff(ep) > bno) {
5704                                 if (--lastx >= 0)
5705                                         ep = xfs_iext_get_ext(ifp, lastx);
5706                         }
5707                         if (lastx >= 0)
5708                                 xfs_bmbt_get_all(ep, &got);
5709                         extno++;
5710                 }
5711         }
5712         ifp->if_lastex = lastx;
5713         *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5714         ASSERT(ifp->if_ext_max ==
5715                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5716         /*
5717          * Convert to a btree if necessary.
5718          */
5719         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5720             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5721                 ASSERT(cur == NULL);
5722                 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5723                         &cur, 0, &tmp_logflags, whichfork);
5724                 logflags |= tmp_logflags;
5725                 if (error)
5726                         goto error0;
5727         }
5728         /*
5729          * transform from btree to extents, give it cur
5730          */
5731         else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5732                  XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5733                 ASSERT(cur != NULL);
5734                 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5735                         whichfork);
5736                 logflags |= tmp_logflags;
5737                 if (error)
5738                         goto error0;
5739         }
5740         /*
5741          * transform from extents to local?
5742          */
5743         ASSERT(ifp->if_ext_max ==
5744                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5745         error = 0;
5746         if (delta && delta->xed_startoff != NULLFILEOFF) {
5747                 /* A change was actually made.
5748                  * Note that delta->xed_blockount is an offset at this
5749                  * point and needs to be converted to a block count.
5750                  */
5751                 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5752                 delta->xed_blockcount -= delta->xed_startoff;
5753         }
5754 error0:
5755         /*
5756          * Log everything.  Do this after conversion, there's no point in
5757          * logging the extent records if we've converted to btree format.
5758          */
5759         if ((logflags & xfs_ilog_fext(whichfork)) &&
5760             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5761                 logflags &= ~xfs_ilog_fext(whichfork);
5762         else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5763                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5764                 logflags &= ~xfs_ilog_fbroot(whichfork);
5765         /*
5766          * Log inode even in the error case, if the transaction
5767          * is dirty we'll need to shut down the filesystem.
5768          */
5769         if (logflags)
5770                 xfs_trans_log_inode(tp, ip, logflags);
5771         if (cur) {
5772                 if (!error) {
5773                         *firstblock = cur->bc_private.b.firstblock;
5774                         cur->bc_private.b.allocated = 0;
5775                 }
5776                 xfs_btree_del_cursor(cur,
5777                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5778         }
5779         return error;
5780 }
5781
5782 /*
5783  * returns 1 for success, 0 if we failed to map the extent.
5784  */
5785 STATIC int
5786 xfs_getbmapx_fix_eof_hole(
5787         xfs_inode_t             *ip,            /* xfs incore inode pointer */
5788         struct getbmapx         *out,           /* output structure */
5789         int                     prealloced,     /* this is a file with
5790                                                  * preallocated data space */
5791         __int64_t               end,            /* last block requested */
5792         xfs_fsblock_t           startblock)
5793 {
5794         __int64_t               fixlen;
5795         xfs_mount_t             *mp;            /* file system mount point */
5796         xfs_ifork_t             *ifp;           /* inode fork pointer */
5797         xfs_extnum_t            lastx;          /* last extent pointer */
5798         xfs_fileoff_t           fileblock;
5799
5800         if (startblock == HOLESTARTBLOCK) {
5801                 mp = ip->i_mount;
5802                 out->bmv_block = -1;
5803                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
5804                 fixlen -= out->bmv_offset;
5805                 if (prealloced && out->bmv_offset + out->bmv_length == end) {
5806                         /* Came to hole at EOF. Trim it. */
5807                         if (fixlen <= 0)
5808                                 return 0;
5809                         out->bmv_length = fixlen;
5810                 }
5811         } else {
5812                 if (startblock == DELAYSTARTBLOCK)
5813                         out->bmv_block = -2;
5814                 else
5815                         out->bmv_block = xfs_fsb_to_db(ip, startblock);
5816                 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
5817                 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
5818                 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
5819                    (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
5820                         out->bmv_oflags |= BMV_OF_LAST;
5821         }
5822
5823         return 1;
5824 }
5825
5826 /*
5827  * Get inode's extents as described in bmv, and format for output.
5828  * Calls formatter to fill the user's buffer until all extents
5829  * are mapped, until the passed-in bmv->bmv_count slots have
5830  * been filled, or until the formatter short-circuits the loop,
5831  * if it is tracking filled-in extents on its own.
5832  */
5833 int                                             /* error code */
5834 xfs_getbmap(
5835         xfs_inode_t             *ip,
5836         struct getbmapx         *bmv,           /* user bmap structure */
5837         xfs_bmap_format_t       formatter,      /* format to user */
5838         void                    *arg)           /* formatter arg */
5839 {
5840         __int64_t               bmvend;         /* last block requested */
5841         int                     error = 0;      /* return value */
5842         __int64_t               fixlen;         /* length for -1 case */
5843         int                     i;              /* extent number */
5844         int                     lock;           /* lock state */
5845         xfs_bmbt_irec_t         *map;           /* buffer for user's data */
5846         xfs_mount_t             *mp;            /* file system mount point */
5847         int                     nex;            /* # of user extents can do */
5848         int                     nexleft;        /* # of user extents left */
5849         int                     subnex;         /* # of bmapi's can do */
5850         int                     nmap;           /* number of map entries */
5851         struct getbmapx         *out;           /* output structure */
5852         int                     whichfork;      /* data or attr fork */
5853         int                     prealloced;     /* this is a file with
5854                                                  * preallocated data space */
5855         int                     iflags;         /* interface flags */
5856         int                     bmapi_flags;    /* flags for xfs_bmapi */
5857         int                     cur_ext = 0;
5858
5859         mp = ip->i_mount;
5860         iflags = bmv->bmv_iflags;
5861         whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5862
5863         if (whichfork == XFS_ATTR_FORK) {
5864                 if (XFS_IFORK_Q(ip)) {
5865                         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5866                             ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5867                             ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5868                                 return XFS_ERROR(EINVAL);
5869                 } else if (unlikely(
5870                            ip->i_d.di_aformat != 0 &&
5871                            ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5872                         XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5873                                          ip->i_mount);
5874                         return XFS_ERROR(EFSCORRUPTED);
5875                 }
5876
5877                 prealloced = 0;
5878                 fixlen = 1LL << 32;
5879         } else {
5880                 /*
5881                  * If the BMV_IF_NO_DMAPI_READ interface bit specified, do
5882                  * not generate a DMAPI read event.  Otherwise, if the
5883                  * DM_EVENT_READ bit is set for the file, generate a read
5884                  * event in order that the DMAPI application may do its thing
5885                  * before we return the extents.  Usually this means restoring
5886                  * user file data to regions of the file that look like holes.
5887                  *
5888                  * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5889                  * BMV_IF_NO_DMAPI_READ so that read events are generated.
5890                  * If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
5891                  * could misinterpret holes in a DMAPI file as true holes,
5892                  * when in fact they may represent offline user data.
5893                  */
5894                 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
5895                     !(iflags & BMV_IF_NO_DMAPI_READ)) {
5896                         error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
5897                                               0, 0, 0, NULL);
5898                         if (error)
5899                                 return XFS_ERROR(error);
5900                 }
5901
5902                 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5903                     ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5904                     ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5905                         return XFS_ERROR(EINVAL);
5906
5907                 if (xfs_get_extsz_hint(ip) ||
5908                     ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5909                         prealloced = 1;
5910                         fixlen = XFS_MAXIOFFSET(mp);
5911                 } else {
5912                         prealloced = 0;
5913                         fixlen = ip->i_size;
5914                 }
5915         }
5916
5917         if (bmv->bmv_length == -1) {
5918                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5919                 bmv->bmv_length =
5920                         max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
5921         } else if (bmv->bmv_length == 0) {
5922                 bmv->bmv_entries = 0;
5923                 return 0;
5924         } else if (bmv->bmv_length < 0) {
5925                 return XFS_ERROR(EINVAL);
5926         }
5927
5928         nex = bmv->bmv_count - 1;
5929         if (nex <= 0)
5930                 return XFS_ERROR(EINVAL);
5931         bmvend = bmv->bmv_offset + bmv->bmv_length;
5932
5933
5934         if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
5935                 return XFS_ERROR(ENOMEM);
5936         out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
5937         if (!out)
5938                 return XFS_ERROR(ENOMEM);
5939
5940         xfs_ilock(ip, XFS_IOLOCK_SHARED);
5941         if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
5942                 if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
5943                         error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
5944                         if (error)
5945                                 goto out_unlock_iolock;
5946                 }
5947
5948                 ASSERT(ip->i_delayed_blks == 0);
5949         }
5950
5951         lock = xfs_ilock_map_shared(ip);
5952
5953         /*
5954          * Don't let nex be bigger than the number of extents
5955          * we can have assuming alternating holes and real extents.
5956          */
5957         if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5958                 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5959
5960         bmapi_flags = xfs_bmapi_aflag(whichfork);
5961         if (!(iflags & BMV_IF_PREALLOC))
5962                 bmapi_flags |= XFS_BMAPI_IGSTATE;
5963
5964         /*
5965          * Allocate enough space to handle "subnex" maps at a time.
5966          */
5967         error = ENOMEM;
5968         subnex = 16;
5969         map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
5970         if (!map)
5971                 goto out_unlock_ilock;
5972
5973         bmv->bmv_entries = 0;
5974
5975         if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
5976             (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
5977                 error = 0;
5978                 goto out_free_map;
5979         }
5980
5981         nexleft = nex;
5982
5983         do {
5984                 nmap = (nexleft > subnex) ? subnex : nexleft;
5985                 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5986                                   XFS_BB_TO_FSB(mp, bmv->bmv_length),
5987                                   bmapi_flags, NULL, 0, map, &nmap,
5988                                   NULL, NULL);
5989                 if (error)
5990                         goto out_free_map;
5991                 ASSERT(nmap <= subnex);
5992
5993                 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5994                         out[cur_ext].bmv_oflags = 0;
5995                         if (map[i].br_state == XFS_EXT_UNWRITTEN)
5996                                 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
5997                         else if (map[i].br_startblock == DELAYSTARTBLOCK)
5998                                 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
5999                         out[cur_ext].bmv_offset =
6000                                 XFS_FSB_TO_BB(mp, map[i].br_startoff);
6001                         out[cur_ext].bmv_length =
6002                                 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
6003                         out[cur_ext].bmv_unused1 = 0;
6004                         out[cur_ext].bmv_unused2 = 0;
6005                         ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
6006                               (map[i].br_startblock != DELAYSTARTBLOCK));
6007                         if (map[i].br_startblock == HOLESTARTBLOCK &&
6008                             whichfork == XFS_ATTR_FORK) {
6009                                 /* came to the end of attribute fork */
6010                                 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
6011                                 goto out_free_map;
6012                         }
6013
6014                         if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
6015                                         prealloced, bmvend,
6016                                         map[i].br_startblock))
6017                                 goto out_free_map;
6018
6019                         nexleft--;
6020                         bmv->bmv_offset =
6021                                 out[cur_ext].bmv_offset +
6022                                 out[cur_ext].bmv_length;
6023                         bmv->bmv_length =
6024                                 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
6025                         bmv->bmv_entries++;
6026                         cur_ext++;
6027                 }
6028         } while (nmap && nexleft && bmv->bmv_length);
6029
6030  out_free_map:
6031         kmem_free(map);
6032  out_unlock_ilock:
6033         xfs_iunlock_map_shared(ip, lock);
6034  out_unlock_iolock:
6035         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
6036
6037         for (i = 0; i < cur_ext; i++) {
6038                 int full = 0;   /* user array is full */
6039
6040                 /* format results & advance arg */
6041                 error = formatter(&arg, &out[i], &full);
6042                 if (error || full)
6043                         break;
6044         }
6045
6046         kmem_free(out);
6047         return error;
6048 }
6049
6050 /*
6051  * Check the last inode extent to determine whether this allocation will result
6052  * in blocks being allocated at the end of the file. When we allocate new data
6053  * blocks at the end of the file which do not start at the previous data block,
6054  * we will try to align the new blocks at stripe unit boundaries.
6055  */
6056 STATIC int                              /* error */
6057 xfs_bmap_isaeof(
6058         xfs_inode_t     *ip,            /* incore inode pointer */
6059         xfs_fileoff_t   off,            /* file offset in fsblocks */
6060         int             whichfork,      /* data or attribute fork */
6061         char            *aeof)          /* return value */
6062 {
6063         int             error;          /* error return value */
6064         xfs_ifork_t     *ifp;           /* inode fork pointer */
6065         xfs_bmbt_rec_host_t *lastrec;   /* extent record pointer */
6066         xfs_extnum_t    nextents;       /* number of file extents */
6067         xfs_bmbt_irec_t s;              /* expanded extent record */
6068
6069         ASSERT(whichfork == XFS_DATA_FORK);
6070         ifp = XFS_IFORK_PTR(ip, whichfork);
6071         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
6072             (error = xfs_iread_extents(NULL, ip, whichfork)))
6073                 return error;
6074         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
6075         if (nextents == 0) {
6076                 *aeof = 1;
6077                 return 0;
6078         }
6079         /*
6080          * Go to the last extent
6081          */
6082         lastrec = xfs_iext_get_ext(ifp, nextents - 1);
6083         xfs_bmbt_get_all(lastrec, &s);
6084         /*
6085          * Check we are allocating in the last extent (for delayed allocations)
6086          * or past the last extent for non-delayed allocations.
6087          */
6088         *aeof = (off >= s.br_startoff &&
6089                  off < s.br_startoff + s.br_blockcount &&
6090                  isnullstartblock(s.br_startblock)) ||
6091                 off >= s.br_startoff + s.br_blockcount;
6092         return 0;
6093 }
6094
6095 /*
6096  * Check if the endoff is outside the last extent. If so the caller will grow
6097  * the allocation to a stripe unit boundary.
6098  */
6099 int                                     /* error */
6100 xfs_bmap_eof(
6101         xfs_inode_t     *ip,            /* incore inode pointer */
6102         xfs_fileoff_t   endoff,         /* file offset in fsblocks */
6103         int             whichfork,      /* data or attribute fork */
6104         int             *eof)           /* result value */
6105 {
6106         xfs_fsblock_t   blockcount;     /* extent block count */
6107         int             error;          /* error return value */
6108         xfs_ifork_t     *ifp;           /* inode fork pointer */
6109         xfs_bmbt_rec_host_t *lastrec;   /* extent record pointer */
6110         xfs_extnum_t    nextents;       /* number of file extents */
6111         xfs_fileoff_t   startoff;       /* extent starting file offset */
6112
6113         ASSERT(whichfork == XFS_DATA_FORK);
6114         ifp = XFS_IFORK_PTR(ip, whichfork);
6115         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
6116             (error = xfs_iread_extents(NULL, ip, whichfork)))
6117                 return error;
6118         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
6119         if (nextents == 0) {
6120                 *eof = 1;
6121                 return 0;
6122         }
6123         /*
6124          * Go to the last extent
6125          */
6126         lastrec = xfs_iext_get_ext(ifp, nextents - 1);
6127         startoff = xfs_bmbt_get_startoff(lastrec);
6128         blockcount = xfs_bmbt_get_blockcount(lastrec);
6129         *eof = endoff >= startoff + blockcount;
6130         return 0;
6131 }
6132
6133 #ifdef DEBUG
6134 STATIC
6135 xfs_buf_t *
6136 xfs_bmap_get_bp(
6137         xfs_btree_cur_t         *cur,
6138         xfs_fsblock_t           bno)
6139 {
6140         int i;
6141         xfs_buf_t *bp;
6142
6143         if (!cur)
6144                 return(NULL);
6145
6146         bp = NULL;
6147         for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
6148                 bp = cur->bc_bufs[i];
6149                 if (!bp) break;
6150                 if (XFS_BUF_ADDR(bp) == bno)
6151                         break;  /* Found it */
6152         }
6153         if (i == XFS_BTREE_MAXLEVELS)
6154                 bp = NULL;
6155
6156         if (!bp) { /* Chase down all the log items to see if the bp is there */
6157                 xfs_log_item_chunk_t    *licp;
6158                 xfs_trans_t             *tp;
6159
6160                 tp = cur->bc_tp;
6161                 licp = &tp->t_items;
6162                 while (!bp && licp != NULL) {
6163                         if (xfs_lic_are_all_free(licp)) {
6164                                 licp = licp->lic_next;
6165                                 continue;
6166                         }
6167                         for (i = 0; i < licp->lic_unused; i++) {
6168                                 xfs_log_item_desc_t     *lidp;
6169                                 xfs_log_item_t          *lip;
6170                                 xfs_buf_log_item_t      *bip;
6171                                 xfs_buf_t               *lbp;
6172
6173                                 if (xfs_lic_isfree(licp, i)) {
6174                                         continue;
6175                                 }
6176
6177                                 lidp = xfs_lic_slot(licp, i);
6178                                 lip = lidp->lid_item;
6179                                 if (lip->li_type != XFS_LI_BUF)
6180                                         continue;
6181
6182                                 bip = (xfs_buf_log_item_t *)lip;
6183                                 lbp = bip->bli_buf;
6184
6185                                 if (XFS_BUF_ADDR(lbp) == bno) {
6186                                         bp = lbp;
6187                                         break; /* Found it */
6188                                 }
6189                         }
6190                         licp = licp->lic_next;
6191                 }
6192         }
6193         return(bp);
6194 }
6195
6196 STATIC void
6197 xfs_check_block(
6198         struct xfs_btree_block  *block,
6199         xfs_mount_t             *mp,
6200         int                     root,
6201         short                   sz)
6202 {
6203         int                     i, j, dmxr;
6204         __be64                  *pp, *thispa;   /* pointer to block address */
6205         xfs_bmbt_key_t          *prevp, *keyp;
6206
6207         ASSERT(be16_to_cpu(block->bb_level) > 0);
6208
6209         prevp = NULL;
6210         for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
6211                 dmxr = mp->m_bmap_dmxr[0];
6212                 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
6213
6214                 if (prevp) {
6215                         ASSERT(be64_to_cpu(prevp->br_startoff) <
6216                                be64_to_cpu(keyp->br_startoff));
6217                 }
6218                 prevp = keyp;
6219
6220                 /*
6221                  * Compare the block numbers to see if there are dups.
6222                  */
6223                 if (root)
6224                         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
6225                 else
6226                         pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
6227
6228                 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
6229                         if (root)
6230                                 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
6231                         else
6232                                 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
6233                         if (*thispa == *pp) {
6234                                 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
6235                                         __func__, j, i,
6236                                         (unsigned long long)be64_to_cpu(*thispa));
6237                                 panic("%s: ptrs are equal in node\n",
6238                                         __func__);
6239                         }
6240                 }
6241         }
6242 }
6243
6244 /*
6245  * Check that the extents for the inode ip are in the right order in all
6246  * btree leaves.
6247  */
6248
6249 STATIC void
6250 xfs_bmap_check_leaf_extents(
6251         xfs_btree_cur_t         *cur,   /* btree cursor or null */
6252         xfs_inode_t             *ip,            /* incore inode pointer */
6253         int                     whichfork)      /* data or attr fork */
6254 {
6255         struct xfs_btree_block  *block; /* current btree block */
6256         xfs_fsblock_t           bno;    /* block # of "block" */
6257         xfs_buf_t               *bp;    /* buffer for "block" */
6258         int                     error;  /* error return value */
6259         xfs_extnum_t            i=0, j; /* index into the extents list */
6260         xfs_ifork_t             *ifp;   /* fork structure */
6261         int                     level;  /* btree level, for checking */
6262         xfs_mount_t             *mp;    /* file system mount structure */
6263         __be64                  *pp;    /* pointer to block address */
6264         xfs_bmbt_rec_t          *ep;    /* pointer to current extent */
6265         xfs_bmbt_rec_t          last = {0, 0}; /* last extent in prev block */
6266         xfs_bmbt_rec_t          *nextp; /* pointer to next extent */
6267         int                     bp_release = 0;
6268
6269         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
6270                 return;
6271         }
6272
6273         bno = NULLFSBLOCK;
6274         mp = ip->i_mount;
6275         ifp = XFS_IFORK_PTR(ip, whichfork);
6276         block = ifp->if_broot;
6277         /*
6278          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6279          */
6280         level = be16_to_cpu(block->bb_level);
6281         ASSERT(level > 0);
6282         xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
6283         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
6284         bno = be64_to_cpu(*pp);
6285
6286         ASSERT(bno != NULLDFSBNO);
6287         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6288         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6289
6290         /*
6291          * Go down the tree until leaf level is reached, following the first
6292          * pointer (leftmost) at each level.
6293          */
6294         while (level-- > 0) {
6295                 /* See if buf is in cur first */
6296                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6297                 if (bp) {
6298                         bp_release = 0;
6299                 } else {
6300                         bp_release = 1;
6301                 }
6302                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6303                                 XFS_BMAP_BTREE_REF)))
6304                         goto error_norelse;
6305                 block = XFS_BUF_TO_BLOCK(bp);
6306                 XFS_WANT_CORRUPTED_GOTO(
6307                         xfs_bmap_sanity_check(mp, bp, level),
6308                         error0);
6309                 if (level == 0)
6310                         break;
6311
6312                 /*
6313                  * Check this block for basic sanity (increasing keys and
6314                  * no duplicate blocks).
6315                  */
6316
6317                 xfs_check_block(block, mp, 0, 0);
6318                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6319                 bno = be64_to_cpu(*pp);
6320                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
6321                 if (bp_release) {
6322                         bp_release = 0;
6323                         xfs_trans_brelse(NULL, bp);
6324                 }
6325         }
6326
6327         /*
6328          * Here with bp and block set to the leftmost leaf node in the tree.
6329          */
6330         i = 0;
6331
6332         /*
6333          * Loop over all leaf nodes checking that all extents are in the right order.
6334          */
6335         for (;;) {
6336                 xfs_fsblock_t   nextbno;
6337                 xfs_extnum_t    num_recs;
6338
6339
6340                 num_recs = xfs_btree_get_numrecs(block);
6341
6342                 /*
6343                  * Read-ahead the next leaf block, if any.
6344                  */
6345
6346                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6347
6348                 /*
6349                  * Check all the extents to make sure they are OK.
6350                  * If we had a previous block, the last entry should
6351                  * conform with the first entry in this one.
6352                  */
6353
6354                 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
6355                 if (i) {
6356                         ASSERT(xfs_bmbt_disk_get_startoff(&last) +
6357                                xfs_bmbt_disk_get_blockcount(&last) <=
6358                                xfs_bmbt_disk_get_startoff(ep));
6359                 }
6360                 for (j = 1; j < num_recs; j++) {
6361                         nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
6362                         ASSERT(xfs_bmbt_disk_get_startoff(ep) +
6363                                xfs_bmbt_disk_get_blockcount(ep) <=
6364                                xfs_bmbt_disk_get_startoff(nextp));
6365                         ep = nextp;
6366                 }
6367
6368                 last = *ep;
6369                 i += num_recs;
6370                 if (bp_release) {
6371                         bp_release = 0;
6372                         xfs_trans_brelse(NULL, bp);
6373                 }
6374                 bno = nextbno;
6375                 /*
6376                  * If we've reached the end, stop.
6377                  */
6378                 if (bno == NULLFSBLOCK)
6379                         break;
6380
6381                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6382                 if (bp) {
6383                         bp_release = 0;
6384                 } else {
6385                         bp_release = 1;
6386                 }
6387                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6388                                 XFS_BMAP_BTREE_REF)))
6389                         goto error_norelse;
6390                 block = XFS_BUF_TO_BLOCK(bp);
6391         }
6392         if (bp_release) {
6393                 bp_release = 0;
6394                 xfs_trans_brelse(NULL, bp);
6395         }
6396         return;
6397
6398 error0:
6399         cmn_err(CE_WARN, "%s: at error0", __func__);
6400         if (bp_release)
6401                 xfs_trans_brelse(NULL, bp);
6402 error_norelse:
6403         cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
6404                 __func__, i);
6405         panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
6406         return;
6407 }
6408 #endif
6409
6410 /*
6411  * Count fsblocks of the given fork.
6412  */
6413 int                                             /* error */
6414 xfs_bmap_count_blocks(
6415         xfs_trans_t             *tp,            /* transaction pointer */
6416         xfs_inode_t             *ip,            /* incore inode */
6417         int                     whichfork,      /* data or attr fork */
6418         int                     *count)         /* out: count of blocks */
6419 {
6420         struct xfs_btree_block  *block; /* current btree block */
6421         xfs_fsblock_t           bno;    /* block # of "block" */
6422         xfs_ifork_t             *ifp;   /* fork structure */
6423         int                     level;  /* btree level, for checking */
6424         xfs_mount_t             *mp;    /* file system mount structure */
6425         __be64                  *pp;    /* pointer to block address */
6426
6427         bno = NULLFSBLOCK;
6428         mp = ip->i_mount;
6429         ifp = XFS_IFORK_PTR(ip, whichfork);
6430         if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6431                 xfs_bmap_count_leaves(ifp, 0,
6432                         ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6433                         count);
6434                 return 0;
6435         }
6436
6437         /*
6438          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6439          */
6440         block = ifp->if_broot;
6441         level = be16_to_cpu(block->bb_level);
6442         ASSERT(level > 0);
6443         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
6444         bno = be64_to_cpu(*pp);
6445         ASSERT(bno != NULLDFSBNO);
6446         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6447         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6448
6449         if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
6450                 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6451                                  mp);
6452                 return XFS_ERROR(EFSCORRUPTED);
6453         }
6454
6455         return 0;
6456 }
6457
6458 /*
6459  * Recursively walks each level of a btree
6460  * to count total fsblocks is use.
6461  */
6462 STATIC int                                     /* error */
6463 xfs_bmap_count_tree(
6464         xfs_mount_t     *mp,            /* file system mount point */
6465         xfs_trans_t     *tp,            /* transaction pointer */
6466         xfs_ifork_t     *ifp,           /* inode fork pointer */
6467         xfs_fsblock_t   blockno,        /* file system block number */
6468         int             levelin,        /* level in btree */
6469         int             *count)         /* Count of blocks */
6470 {
6471         int                     error;
6472         xfs_buf_t               *bp, *nbp;
6473         int                     level = levelin;
6474         __be64                  *pp;
6475         xfs_fsblock_t           bno = blockno;
6476         xfs_fsblock_t           nextbno;
6477         struct xfs_btree_block  *block, *nextblock;
6478         int                     numrecs;
6479
6480         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6481                 return error;
6482         *count += 1;
6483         block = XFS_BUF_TO_BLOCK(bp);
6484
6485         if (--level) {
6486                 /* Not at node above leaves, count this level of nodes */
6487                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6488                 while (nextbno != NULLFSBLOCK) {
6489                         if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6490                                 0, &nbp, XFS_BMAP_BTREE_REF)))
6491                                 return error;
6492                         *count += 1;
6493                         nextblock = XFS_BUF_TO_BLOCK(nbp);
6494                         nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
6495                         xfs_trans_brelse(tp, nbp);
6496                 }
6497
6498                 /* Dive to the next level */
6499                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6500                 bno = be64_to_cpu(*pp);
6501                 if (unlikely((error =
6502                      xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6503                         xfs_trans_brelse(tp, bp);
6504                         XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6505                                          XFS_ERRLEVEL_LOW, mp);
6506                         return XFS_ERROR(EFSCORRUPTED);
6507                 }
6508                 xfs_trans_brelse(tp, bp);
6509         } else {
6510                 /* count all level 1 nodes and their leaves */
6511                 for (;;) {
6512                         nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6513                         numrecs = be16_to_cpu(block->bb_numrecs);
6514                         xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
6515                         xfs_trans_brelse(tp, bp);
6516                         if (nextbno == NULLFSBLOCK)
6517                                 break;
6518                         bno = nextbno;
6519                         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6520                                 XFS_BMAP_BTREE_REF)))
6521                                 return error;
6522                         *count += 1;
6523                         block = XFS_BUF_TO_BLOCK(bp);
6524                 }
6525         }
6526         return 0;
6527 }
6528
6529 /*
6530  * Count leaf blocks given a range of extent records.
6531  */
6532 STATIC void
6533 xfs_bmap_count_leaves(
6534         xfs_ifork_t             *ifp,
6535         xfs_extnum_t            idx,
6536         int                     numrecs,
6537         int                     *count)
6538 {
6539         int             b;
6540
6541         for (b = 0; b < numrecs; b++) {
6542                 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
6543                 *count += xfs_bmbt_get_blockcount(frp);
6544         }
6545 }
6546
6547 /*
6548  * Count leaf blocks given a range of extent records originally
6549  * in btree format.
6550  */
6551 STATIC void
6552 xfs_bmap_disk_count_leaves(
6553         struct xfs_mount        *mp,
6554         struct xfs_btree_block  *block,
6555         int                     numrecs,
6556         int                     *count)
6557 {
6558         int             b;
6559         xfs_bmbt_rec_t  *frp;
6560
6561         for (b = 1; b <= numrecs; b++) {
6562                 frp = XFS_BMBT_REC_ADDR(mp, block, b);
6563                 *count += xfs_bmbt_disk_get_blockcount(frp);
6564         }
6565 }