2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_btree.h"
40 #include "xfs_btree_trace.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
44 #include "xfs_error.h"
45 #include "xfs_quota.h"
48 * Prototypes for internal btree functions.
52 STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
53 STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
54 STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
55 STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *);
56 STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
57 __uint64_t *, xfs_btree_cur_t **, int *);
61 #define ENTRY XBT_ENTRY
62 #define ERROR XBT_ERROR
66 * Keep the XFS_BMBT_TRACE_ names around for now until all code using them
67 * is converted to be generic and thus switches to the XFS_BTREE_TRACE_ names.
69 #define XFS_BMBT_TRACE_ARGBI(c,b,i) \
70 XFS_BTREE_TRACE_ARGBI(c,b,i)
71 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
72 XFS_BTREE_TRACE_ARGBII(c,b,i,j)
73 #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
74 XFS_BTREE_TRACE_ARGFFFI(c,o,b,i,j)
75 #define XFS_BMBT_TRACE_ARGI(c,i) \
76 XFS_BTREE_TRACE_ARGI(c,i)
77 #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
78 XFS_BTREE_TRACE_ARGIPK(c,i,(union xfs_btree_ptr)f,s)
79 #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
80 XFS_BTREE_TRACE_ARGIPR(c,i, \
81 (union xfs_btree_ptr)f, (union xfs_btree_rec *)r)
82 #define XFS_BMBT_TRACE_ARGIK(c,i,k) \
83 XFS_BTREE_TRACE_ARGIK(c,i,(union xfs_btree_key *)k)
84 #define XFS_BMBT_TRACE_CURSOR(c,s) \
85 XFS_BTREE_TRACE_CURSOR(c,s)
93 * Delete record pointed to by cur/level.
95 STATIC int /* error */
99 int *stat) /* success/failure */
101 xfs_bmbt_block_t *block; /* bmap btree block */
102 xfs_fsblock_t bno; /* fs-relative block number */
103 xfs_buf_t *bp; /* buffer for block */
104 int error; /* error return value */
105 int i; /* loop counter */
106 int j; /* temp state */
107 xfs_bmbt_key_t key; /* bmap btree key */
108 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
109 xfs_fsblock_t lbno; /* left sibling block number */
110 xfs_buf_t *lbp; /* left buffer pointer */
111 xfs_bmbt_block_t *left; /* left btree block */
112 xfs_bmbt_key_t *lkp; /* left btree key */
113 xfs_bmbt_ptr_t *lpp; /* left address pointer */
114 int lrecs=0; /* left record count */
115 xfs_bmbt_rec_t *lrp; /* left record pointer */
116 xfs_mount_t *mp; /* file system mount point */
117 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
118 int ptr; /* key/record index */
119 xfs_fsblock_t rbno; /* right sibling block number */
120 xfs_buf_t *rbp; /* right buffer pointer */
121 xfs_bmbt_block_t *right; /* right btree block */
122 xfs_bmbt_key_t *rkp; /* right btree key */
123 xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */
124 xfs_bmbt_ptr_t *rpp; /* right address pointer */
125 xfs_bmbt_block_t *rrblock; /* right-right btree block */
126 xfs_buf_t *rrbp; /* right-right buffer pointer */
127 int rrecs=0; /* right record count */
128 xfs_bmbt_rec_t *rrp; /* right record pointer */
129 xfs_btree_cur_t *tcur; /* temporary btree cursor */
130 int numrecs; /* temporary numrec count */
131 int numlrecs, numrrecs;
133 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
134 XFS_BMBT_TRACE_ARGI(cur, level);
135 ptr = cur->bc_ptrs[level];
138 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
142 block = xfs_bmbt_get_block(cur, level, &bp);
143 numrecs = be16_to_cpu(block->bb_numrecs);
145 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
146 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
151 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
155 XFS_STATS_INC(xs_bmbt_delrec);
157 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
158 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
160 for (i = ptr; i < numrecs; i++) {
161 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
162 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
168 memmove(&kp[ptr - 1], &kp[ptr],
169 (numrecs - ptr) * sizeof(*kp));
170 memmove(&pp[ptr - 1], &pp[ptr],
171 (numrecs - ptr) * sizeof(*pp));
172 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
173 xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
176 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
178 memmove(&rp[ptr - 1], &rp[ptr],
179 (numrecs - ptr) * sizeof(*rp));
180 xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
184 cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
189 block->bb_numrecs = cpu_to_be16(numrecs);
190 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
192 * We're at the root level.
193 * First, shrink the root block in-memory.
194 * Try to get rid of the next level down.
195 * If we can't then there's nothing left to do.
197 if (level == cur->bc_nlevels - 1) {
198 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
199 cur->bc_private.b.whichfork);
200 if ((error = xfs_bmbt_killroot(cur))) {
201 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
204 if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
205 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
208 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
212 if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) {
213 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
216 if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
217 if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
218 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
221 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
225 rbno = be64_to_cpu(block->bb_rightsib);
226 lbno = be64_to_cpu(block->bb_leftsib);
228 * One child of root, need to get a chance to copy its contents
229 * into the root and delete it. Can't go up to next level,
230 * there's nothing to delete there.
232 if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
233 level == cur->bc_nlevels - 2) {
234 if ((error = xfs_bmbt_killroot(cur))) {
235 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
238 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
239 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
242 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
246 ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
247 if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
248 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
252 if (rbno != NULLFSBLOCK) {
253 i = xfs_btree_lastrec(tcur, level);
254 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
255 if ((error = xfs_btree_increment(tcur, level, &i))) {
256 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
259 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
260 i = xfs_btree_lastrec(tcur, level);
261 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
262 rbp = tcur->bc_bufs[level];
263 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
265 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
266 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
270 bno = be64_to_cpu(right->bb_leftsib);
271 if (be16_to_cpu(right->bb_numrecs) - 1 >=
272 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
273 if ((error = xfs_bmbt_lshift(tcur, level, &i))) {
274 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
278 ASSERT(be16_to_cpu(block->bb_numrecs) >=
279 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
280 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
283 if ((error = xfs_btree_decrement(cur,
285 XFS_BMBT_TRACE_CURSOR(cur,
290 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
295 rrecs = be16_to_cpu(right->bb_numrecs);
296 if (lbno != NULLFSBLOCK) {
297 i = xfs_btree_firstrec(tcur, level);
298 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
299 if ((error = xfs_btree_decrement(tcur, level, &i))) {
300 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
303 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
306 if (lbno != NULLFSBLOCK) {
307 i = xfs_btree_firstrec(tcur, level);
308 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
310 * decrement to last in block
312 if ((error = xfs_btree_decrement(tcur, level, &i))) {
313 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
316 i = xfs_btree_firstrec(tcur, level);
317 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
318 lbp = tcur->bc_bufs[level];
319 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
321 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
322 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
326 bno = be64_to_cpu(left->bb_rightsib);
327 if (be16_to_cpu(left->bb_numrecs) - 1 >=
328 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
329 if ((error = xfs_btree_rshift(tcur, level, &i))) {
330 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
334 ASSERT(be16_to_cpu(block->bb_numrecs) >=
335 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
336 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
340 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
345 lrecs = be16_to_cpu(left->bb_numrecs);
347 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
350 ASSERT(bno != NULLFSBLOCK);
351 if (lbno != NULLFSBLOCK &&
352 lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
356 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
357 XFS_BMAP_BTREE_REF))) {
358 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
361 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
362 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
363 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
366 } else if (rbno != NULLFSBLOCK &&
367 rrecs + be16_to_cpu(block->bb_numrecs) <=
368 XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
372 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
373 XFS_BMAP_BTREE_REF))) {
374 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
377 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
378 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
379 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
382 lrecs = be16_to_cpu(left->bb_numrecs);
384 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
385 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
388 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
392 numlrecs = be16_to_cpu(left->bb_numrecs);
393 numrrecs = be16_to_cpu(right->bb_numrecs);
395 lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
396 lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
397 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
398 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
400 for (i = 0; i < numrrecs; i++) {
401 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
402 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
407 memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
408 memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
409 xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
410 xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
412 lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
413 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
414 memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
415 xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
417 be16_add_cpu(&left->bb_numrecs, numrrecs);
418 left->bb_rightsib = right->bb_rightsib;
419 xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
420 if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
421 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
422 be64_to_cpu(left->bb_rightsib),
423 0, &rrbp, XFS_BMAP_BTREE_REF))) {
424 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
427 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
428 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
429 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
432 rrblock->bb_leftsib = cpu_to_be64(lbno);
433 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
435 xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
436 cur->bc_private.b.flist, mp);
437 cur->bc_private.b.ip->i_d.di_nblocks--;
438 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
439 XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
440 XFS_TRANS_DQ_BCOUNT, -1L);
441 xfs_trans_binval(cur->bc_tp, rbp);
443 cur->bc_bufs[level] = lbp;
444 cur->bc_ptrs[level] += lrecs;
445 cur->bc_ra[level] = 0;
446 } else if ((error = xfs_btree_increment(cur, level + 1, &i))) {
447 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
451 cur->bc_ptrs[level]--;
452 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
458 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
463 * Insert one record/level. Return information to the caller
464 * allowing the next level up to proceed if necessary.
466 STATIC int /* error */
468 xfs_btree_cur_t *cur,
471 xfs_bmbt_rec_t *recp,
472 xfs_btree_cur_t **curp,
473 int *stat) /* no-go/done/continue */
475 xfs_bmbt_block_t *block; /* bmap btree block */
476 xfs_buf_t *bp; /* buffer for block */
477 int error; /* error return value */
478 int i; /* loop index */
479 xfs_bmbt_key_t key; /* bmap btree key */
480 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
481 int logflags; /* inode logging flags */
482 xfs_fsblock_t nbno; /* new block number */
483 struct xfs_btree_cur *ncur; /* new btree cursor */
484 __uint64_t startoff; /* new btree key value */
485 xfs_bmbt_rec_t nrec; /* new record count */
486 int optr; /* old key/record index */
487 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
488 int ptr; /* key/record index */
489 xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */
492 ASSERT(level < cur->bc_nlevels);
493 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
494 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
496 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
497 optr = ptr = cur->bc_ptrs[level];
499 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
503 XFS_STATS_INC(xs_bmbt_insrec);
504 block = xfs_bmbt_get_block(cur, level, &bp);
505 numrecs = be16_to_cpu(block->bb_numrecs);
507 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
508 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
511 if (ptr <= numrecs) {
513 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
514 xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
516 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
517 xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
522 if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
523 if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
525 * A root block, that can be made bigger.
527 xfs_iroot_realloc(cur->bc_private.b.ip, 1,
528 cur->bc_private.b.whichfork);
529 block = xfs_bmbt_get_block(cur, level, &bp);
530 } else if (level == cur->bc_nlevels - 1) {
531 if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) ||
533 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
536 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
538 block = xfs_bmbt_get_block(cur, level, &bp);
540 if ((error = xfs_btree_rshift(cur, level, &i))) {
541 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
547 if ((error = xfs_bmbt_lshift(cur, level, &i))) {
548 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
552 optr = ptr = cur->bc_ptrs[level];
554 if ((error = xfs_bmbt_split(cur, level,
555 &nbno, &startoff, &ncur,
557 XFS_BMBT_TRACE_CURSOR(cur,
562 block = xfs_bmbt_get_block(
566 xfs_btree_check_lblock(cur,
567 block, level, bp))) {
568 XFS_BMBT_TRACE_CURSOR(
573 ptr = cur->bc_ptrs[level];
574 xfs_bmbt_disk_set_allf(&nrec,
578 XFS_BMBT_TRACE_CURSOR(cur,
587 numrecs = be16_to_cpu(block->bb_numrecs);
589 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
590 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
592 for (i = numrecs; i >= ptr; i--) {
593 if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
595 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
600 memmove(&kp[ptr], &kp[ptr - 1],
601 (numrecs - ptr + 1) * sizeof(*kp));
602 memmove(&pp[ptr], &pp[ptr - 1],
603 (numrecs - ptr + 1) * sizeof(*pp));
605 if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
606 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
611 pp[ptr - 1] = cpu_to_be64(*bnop);
613 block->bb_numrecs = cpu_to_be16(numrecs);
614 xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
615 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
617 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
618 memmove(&rp[ptr], &rp[ptr - 1],
619 (numrecs - ptr + 1) * sizeof(*rp));
622 block->bb_numrecs = cpu_to_be16(numrecs);
623 xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
625 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
629 xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
632 xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
636 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) {
637 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
641 if (nbno != NULLFSBLOCK) {
645 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
652 xfs_btree_cur_t *cur)
654 xfs_bmbt_block_t *block;
655 xfs_bmbt_block_t *cblock;
669 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
670 level = cur->bc_nlevels - 1;
673 * Don't deal with the root block needs to be a leaf case.
674 * We're just going to turn the thing back into extents anyway.
677 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
680 block = xfs_bmbt_get_block(cur, level, &cbp);
682 * Give up if the root has multiple children.
684 if (be16_to_cpu(block->bb_numrecs) != 1) {
685 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
689 * Only do this if the next level will fit.
690 * Then the data must be copied up to the inode,
691 * instead of freeing the root you free the next level.
693 cbp = cur->bc_bufs[level - 1];
694 cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
695 if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
696 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
699 ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO);
700 ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO);
701 ip = cur->bc_private.b.ip;
702 ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
703 ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
704 XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
705 i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
707 xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
708 block = ifp->if_broot;
710 be16_add_cpu(&block->bb_numrecs, i);
711 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
712 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
713 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
714 memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp));
715 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
716 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
718 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
719 if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) {
720 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
725 memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp));
726 xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
727 cur->bc_private.b.flist, cur->bc_mp);
728 ip->i_d.di_nblocks--;
729 XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
730 XFS_TRANS_DQ_BCOUNT, -1L);
731 xfs_trans_binval(cur->bc_tp, cbp);
732 cur->bc_bufs[level - 1] = NULL;
733 be16_add_cpu(&block->bb_level, -1);
734 xfs_trans_log_inode(cur->bc_tp, ip,
735 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
737 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
742 * Log key values from the btree block.
746 xfs_btree_cur_t *cur,
753 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
754 XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
757 xfs_bmbt_block_t *block;
762 block = XFS_BUF_TO_BMBT_BLOCK(bp);
763 kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
764 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
765 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
766 xfs_trans_log_buf(tp, bp, first, last);
770 ip = cur->bc_private.b.ip;
771 xfs_trans_log_inode(tp, ip,
772 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
774 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
778 * Log pointer values from the btree block.
782 xfs_btree_cur_t *cur,
789 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
790 XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
793 xfs_bmbt_block_t *block;
798 block = XFS_BUF_TO_BMBT_BLOCK(bp);
799 pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
800 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
801 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
802 xfs_trans_log_buf(tp, bp, first, last);
806 ip = cur->bc_private.b.ip;
807 xfs_trans_log_inode(tp, ip,
808 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
810 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
814 * Move 1 record left from cur/level if possible.
815 * Update cur to reflect the new path.
817 STATIC int /* error */
819 xfs_btree_cur_t *cur,
821 int *stat) /* success/failure */
823 int error; /* error return value */
825 int i; /* loop counter */
827 xfs_bmbt_key_t key; /* bmap btree key */
828 xfs_buf_t *lbp; /* left buffer pointer */
829 xfs_bmbt_block_t *left; /* left btree block */
830 xfs_bmbt_key_t *lkp=NULL; /* left btree key */
831 xfs_bmbt_ptr_t *lpp; /* left address pointer */
832 int lrecs; /* left record count */
833 xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */
834 xfs_mount_t *mp; /* file system mount point */
835 xfs_buf_t *rbp; /* right buffer pointer */
836 xfs_bmbt_block_t *right; /* right btree block */
837 xfs_bmbt_key_t *rkp=NULL; /* right btree key */
838 xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */
839 xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
840 int rrecs; /* right record count */
842 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
843 XFS_BMBT_TRACE_ARGI(cur, level);
844 if (level == cur->bc_nlevels - 1) {
845 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
849 rbp = cur->bc_bufs[level];
850 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
852 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
853 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
857 if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) {
858 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
862 if (cur->bc_ptrs[level] <= 1) {
863 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
868 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0,
869 &lbp, XFS_BMAP_BTREE_REF))) {
870 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
873 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
874 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
875 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
878 if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
879 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
883 lrecs = be16_to_cpu(left->bb_numrecs) + 1;
885 lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur);
886 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
888 xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs);
889 lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur);
890 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
892 if ((error = xfs_btree_check_lptr_disk(cur, *rpp, level))) {
893 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
898 xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs);
900 lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur);
901 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
903 xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs);
905 left->bb_numrecs = cpu_to_be16(lrecs);
906 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
909 xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp);
911 xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp);
913 rrecs = be16_to_cpu(right->bb_numrecs) - 1;
914 right->bb_numrecs = cpu_to_be16(rrecs);
915 xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
918 for (i = 0; i < rrecs; i++) {
919 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i + 1],
921 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
926 memmove(rkp, rkp + 1, rrecs * sizeof(*rkp));
927 memmove(rpp, rpp + 1, rrecs * sizeof(*rpp));
928 xfs_bmbt_log_keys(cur, rbp, 1, rrecs);
929 xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs);
931 memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
932 xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
933 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
936 if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) {
937 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
940 cur->bc_ptrs[level]--;
941 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
947 * Determine the extent state.
956 ASSERT(blks != 0); /* saved for DMIG */
957 return XFS_EXT_UNWRITTEN;
964 * Split cur/level block in half.
965 * Return new block number and its first record (to be inserted into parent).
967 STATIC int /* error */
969 xfs_btree_cur_t *cur,
972 __uint64_t *startoff,
973 xfs_btree_cur_t **curp,
974 int *stat) /* success/failure */
976 xfs_alloc_arg_t args; /* block allocation args */
977 int error; /* error return value */
978 int i; /* loop counter */
979 xfs_fsblock_t lbno; /* left sibling block number */
980 xfs_buf_t *lbp; /* left buffer pointer */
981 xfs_bmbt_block_t *left; /* left btree block */
982 xfs_bmbt_key_t *lkp; /* left btree key */
983 xfs_bmbt_ptr_t *lpp; /* left address pointer */
984 xfs_bmbt_rec_t *lrp; /* left record pointer */
985 xfs_buf_t *rbp; /* right buffer pointer */
986 xfs_bmbt_block_t *right; /* right btree block */
987 xfs_bmbt_key_t *rkp; /* right btree key */
988 xfs_bmbt_ptr_t *rpp; /* right address pointer */
989 xfs_bmbt_block_t *rrblock; /* right-right btree block */
990 xfs_buf_t *rrbp; /* right-right buffer pointer */
991 xfs_bmbt_rec_t *rrp; /* right record pointer */
993 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
994 // disable until merged into common code
995 // XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff);
996 args.tp = cur->bc_tp;
997 args.mp = cur->bc_mp;
998 lbp = cur->bc_bufs[level];
999 lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
1000 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1001 args.fsbno = cur->bc_private.b.firstblock;
1002 args.firstblock = args.fsbno;
1004 if (args.fsbno == NULLFSBLOCK) {
1006 args.type = XFS_ALLOCTYPE_START_BNO;
1008 * Make sure there is sufficient room left in the AG to
1009 * complete a full tree split for an extent insert. If
1010 * we are converting the middle part of an extent then
1011 * we may need space for two tree splits.
1013 * We are relying on the caller to make the correct block
1014 * reservation for this operation to succeed. If the
1015 * reservation amount is insufficient then we may fail a
1016 * block allocation here and corrupt the filesystem.
1018 args.minleft = xfs_trans_get_block_res(args.tp);
1019 } else if (cur->bc_private.b.flist->xbf_low)
1020 args.type = XFS_ALLOCTYPE_START_BNO;
1022 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1023 args.mod = args.alignment = args.total = args.isfl =
1024 args.userdata = args.minalignslop = 0;
1025 args.minlen = args.maxlen = args.prod = 1;
1026 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1027 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
1028 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1029 return XFS_ERROR(ENOSPC);
1031 if ((error = xfs_alloc_vextent(&args))) {
1032 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1035 if (args.fsbno == NULLFSBLOCK && args.minleft) {
1037 * Could not find an AG with enough free space to satisfy
1038 * a full btree split. Try again without minleft and if
1039 * successful activate the lowspace algorithm.
1042 args.type = XFS_ALLOCTYPE_FIRST_AG;
1044 if ((error = xfs_alloc_vextent(&args))) {
1045 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1048 cur->bc_private.b.flist->xbf_low = 1;
1050 if (args.fsbno == NULLFSBLOCK) {
1051 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1055 ASSERT(args.len == 1);
1056 cur->bc_private.b.firstblock = args.fsbno;
1057 cur->bc_private.b.allocated++;
1058 cur->bc_private.b.ip->i_d.di_nblocks++;
1059 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
1060 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1061 XFS_TRANS_DQ_BCOUNT, 1L);
1062 rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
1063 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
1065 if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) {
1066 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1070 right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1071 right->bb_level = left->bb_level;
1072 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
1073 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1074 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1075 be16_add_cpu(&right->bb_numrecs, 1);
1076 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1078 lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
1079 lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
1080 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1081 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1083 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1084 if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) {
1085 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1090 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1091 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1092 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1093 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1094 *startoff = be64_to_cpu(rkp->br_startoff);
1096 lrp = XFS_BMAP_REC_IADDR(left, i, cur);
1097 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1098 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1099 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1100 *startoff = xfs_bmbt_disk_get_startoff(rrp);
1102 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1103 right->bb_rightsib = left->bb_rightsib;
1104 left->bb_rightsib = cpu_to_be64(args.fsbno);
1105 right->bb_leftsib = cpu_to_be64(lbno);
1106 xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
1107 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
1108 if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) {
1109 if ((error = xfs_btree_read_bufl(args.mp, args.tp,
1110 be64_to_cpu(right->bb_rightsib), 0, &rrbp,
1111 XFS_BMAP_BTREE_REF))) {
1112 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1115 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
1116 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
1117 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1120 rrblock->bb_leftsib = cpu_to_be64(args.fsbno);
1121 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
1123 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
1124 xfs_btree_setbuf(cur, level, rbp);
1125 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
1127 if (level + 1 < cur->bc_nlevels) {
1128 if ((error = xfs_btree_dup_cursor(cur, curp))) {
1129 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1132 (*curp)->bc_ptrs[level + 1]++;
1135 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1141 * Convert on-disk form of btree root to in-memory form.
1145 xfs_bmdr_block_t *dblock,
1147 xfs_bmbt_block_t *rblock,
1151 xfs_bmbt_key_t *fkp;
1153 xfs_bmbt_key_t *tkp;
1156 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1157 rblock->bb_level = dblock->bb_level;
1158 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
1159 rblock->bb_numrecs = dblock->bb_numrecs;
1160 rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
1161 rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
1162 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
1163 fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
1164 tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
1165 fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
1166 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
1167 dmxr = be16_to_cpu(dblock->bb_numrecs);
1168 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
1169 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
1173 * Delete the record pointed to by cur.
1177 xfs_btree_cur_t *cur,
1178 int *stat) /* success/failure */
1180 int error; /* error return value */
1184 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1185 for (level = 0, i = 2; i == 2; level++) {
1186 if ((error = xfs_bmbt_delrec(cur, level, &i))) {
1187 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1192 for (level = 1; level < cur->bc_nlevels; level++) {
1193 if (cur->bc_ptrs[level] == 0) {
1194 if ((error = xfs_btree_decrement(cur, level,
1196 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1203 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1209 * Convert a compressed bmap extent record to an uncompressed form.
1210 * This code must be in sync with the routines xfs_bmbt_get_startoff,
1211 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
1223 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
1224 s->br_startoff = ((xfs_fileoff_t)l0 &
1225 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1227 s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
1228 (((xfs_fsblock_t)l1) >> 21);
1234 b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
1235 (((xfs_dfsbno_t)l1) >> 21);
1236 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1237 s->br_startblock = (xfs_fsblock_t)b;
1240 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
1242 #endif /* XFS_BIG_BLKNOS */
1243 s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
1244 /* This is xfs_extent_state() in-line */
1246 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
1247 st = XFS_EXT_UNWRITTEN;
1255 xfs_bmbt_rec_host_t *r,
1258 __xfs_bmbt_get_all(r->l0, r->l1, s);
1262 * Get the block pointer for the given level of the cursor.
1263 * Fill in the buffer pointer, if applicable.
1267 xfs_btree_cur_t *cur,
1272 xfs_bmbt_block_t *rval;
1274 if (level < cur->bc_nlevels - 1) {
1275 *bpp = cur->bc_bufs[level];
1276 rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
1279 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
1280 cur->bc_private.b.whichfork);
1281 rval = ifp->if_broot;
1287 * Extract the blockcount field from an in memory bmap extent record.
1290 xfs_bmbt_get_blockcount(
1291 xfs_bmbt_rec_host_t *r)
1293 return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
1297 * Extract the startblock field from an in memory bmap extent record.
1300 xfs_bmbt_get_startblock(
1301 xfs_bmbt_rec_host_t *r)
1304 return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1305 (((xfs_fsblock_t)r->l1) >> 21);
1310 b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1311 (((xfs_dfsbno_t)r->l1) >> 21);
1312 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1313 return (xfs_fsblock_t)b;
1315 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
1317 #endif /* XFS_BIG_BLKNOS */
1321 * Extract the startoff field from an in memory bmap extent record.
1324 xfs_bmbt_get_startoff(
1325 xfs_bmbt_rec_host_t *r)
1327 return ((xfs_fileoff_t)r->l0 &
1328 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1333 xfs_bmbt_rec_host_t *r)
1337 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
1338 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
1342 /* Endian flipping versions of the bmbt extraction functions */
1344 xfs_bmbt_disk_get_all(
1348 __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
1352 * Extract the blockcount field from an on disk bmap extent record.
1355 xfs_bmbt_disk_get_blockcount(
1358 return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
1362 * Extract the startoff field from a disk format bmap extent record.
1365 xfs_bmbt_disk_get_startoff(
1368 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
1369 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1373 * Insert the current record at the point referenced by cur.
1375 * A multi-level split of the tree on insert will invalidate the original
1376 * cursor. All callers of this function should assume that the cursor is
1377 * no longer valid and revalidate it.
1381 xfs_btree_cur_t *cur,
1382 int *stat) /* success/failure */
1384 int error; /* error return value */
1388 xfs_btree_cur_t *ncur;
1389 xfs_bmbt_rec_t nrec;
1390 xfs_btree_cur_t *pcur;
1392 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1395 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
1399 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
1402 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1403 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1406 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1407 if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
1408 cur->bc_nlevels = pcur->bc_nlevels;
1409 cur->bc_private.b.allocated +=
1410 pcur->bc_private.b.allocated;
1411 pcur->bc_private.b.allocated = 0;
1412 ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
1413 XFS_IS_REALTIME_INODE(cur->bc_private.b.ip));
1414 cur->bc_private.b.firstblock =
1415 pcur->bc_private.b.firstblock;
1416 ASSERT(cur->bc_private.b.flist ==
1417 pcur->bc_private.b.flist);
1418 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1424 } while (nbno != NULLFSBLOCK);
1425 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1429 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1434 * Log fields from the btree block header.
1438 xfs_btree_cur_t *cur,
1445 static const short offsets[] = {
1446 offsetof(xfs_bmbt_block_t, bb_magic),
1447 offsetof(xfs_bmbt_block_t, bb_level),
1448 offsetof(xfs_bmbt_block_t, bb_numrecs),
1449 offsetof(xfs_bmbt_block_t, bb_leftsib),
1450 offsetof(xfs_bmbt_block_t, bb_rightsib),
1451 sizeof(xfs_bmbt_block_t)
1454 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1455 XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
1458 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
1460 xfs_trans_log_buf(tp, bp, first, last);
1462 xfs_trans_log_inode(tp, cur->bc_private.b.ip,
1463 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
1464 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1468 * Log record values from the btree block.
1472 xfs_btree_cur_t *cur,
1477 xfs_bmbt_block_t *block;
1483 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1484 XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
1487 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1488 rp = XFS_BMAP_REC_DADDR(block, 1, cur);
1489 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
1490 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
1491 xfs_trans_log_buf(tp, bp, first, last);
1492 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1496 * Give the bmap btree a new root block. Copy the old broot contents
1497 * down into a real block and make the broot point to it.
1501 xfs_btree_cur_t *cur, /* btree cursor */
1502 int *logflags, /* logging flags for inode */
1503 int *stat) /* return status - 0 fail */
1505 xfs_alloc_arg_t args; /* allocation arguments */
1506 xfs_bmbt_block_t *block; /* bmap btree block */
1507 xfs_buf_t *bp; /* buffer for block */
1508 xfs_bmbt_block_t *cblock; /* child btree block */
1509 xfs_bmbt_key_t *ckp; /* child key pointer */
1510 xfs_bmbt_ptr_t *cpp; /* child ptr pointer */
1511 int error; /* error return code */
1513 int i; /* loop counter */
1515 xfs_bmbt_key_t *kp; /* pointer to bmap btree key */
1516 int level; /* btree level */
1517 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
1519 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1520 level = cur->bc_nlevels - 1;
1521 block = xfs_bmbt_get_block(cur, level, &bp);
1523 * Copy the root into a real block.
1525 args.mp = cur->bc_mp;
1526 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
1527 args.tp = cur->bc_tp;
1528 args.fsbno = cur->bc_private.b.firstblock;
1529 args.mod = args.minleft = args.alignment = args.total = args.isfl =
1530 args.userdata = args.minalignslop = 0;
1531 args.minlen = args.maxlen = args.prod = 1;
1532 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1533 args.firstblock = args.fsbno;
1534 if (args.fsbno == NULLFSBLOCK) {
1536 if ((error = xfs_btree_check_lptr_disk(cur, *pp, level))) {
1537 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1541 args.fsbno = be64_to_cpu(*pp);
1542 args.type = XFS_ALLOCTYPE_START_BNO;
1543 } else if (cur->bc_private.b.flist->xbf_low)
1544 args.type = XFS_ALLOCTYPE_START_BNO;
1546 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1547 if ((error = xfs_alloc_vextent(&args))) {
1548 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1551 if (args.fsbno == NULLFSBLOCK) {
1552 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1556 ASSERT(args.len == 1);
1557 cur->bc_private.b.firstblock = args.fsbno;
1558 cur->bc_private.b.allocated++;
1559 cur->bc_private.b.ip->i_d.di_nblocks++;
1560 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1561 XFS_TRANS_DQ_BCOUNT, 1L);
1562 bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
1563 cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
1565 be16_add_cpu(&block->bb_level, 1);
1566 block->bb_numrecs = cpu_to_be16(1);
1568 cur->bc_ptrs[level + 1] = 1;
1569 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
1570 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
1571 memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp));
1572 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
1574 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
1575 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
1576 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1581 memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp));
1583 if ((error = xfs_btree_check_lptr(cur, args.fsbno, level))) {
1584 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1588 *pp = cpu_to_be64(args.fsbno);
1589 xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs),
1590 cur->bc_private.b.whichfork);
1591 xfs_btree_setbuf(cur, level, bp);
1593 * Do all this logging at the end so that
1594 * the root is at the right level.
1596 xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS);
1597 xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
1598 xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
1599 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1601 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
1607 * Set all the fields in a bmap extent record from the arguments.
1611 xfs_bmbt_rec_host_t *r,
1612 xfs_fileoff_t startoff,
1613 xfs_fsblock_t startblock,
1614 xfs_filblks_t blockcount,
1617 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
1619 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
1620 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
1621 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
1624 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
1626 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1627 ((xfs_bmbt_rec_base_t)startoff << 9) |
1628 ((xfs_bmbt_rec_base_t)startblock >> 43);
1629 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
1630 ((xfs_bmbt_rec_base_t)blockcount &
1631 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1632 #else /* !XFS_BIG_BLKNOS */
1633 if (ISNULLSTARTBLOCK(startblock)) {
1634 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1635 ((xfs_bmbt_rec_base_t)startoff << 9) |
1636 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1637 r->l1 = XFS_MASK64HI(11) |
1638 ((xfs_bmbt_rec_base_t)startblock << 21) |
1639 ((xfs_bmbt_rec_base_t)blockcount &
1640 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1642 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1643 ((xfs_bmbt_rec_base_t)startoff << 9);
1644 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
1645 ((xfs_bmbt_rec_base_t)blockcount &
1646 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1648 #endif /* XFS_BIG_BLKNOS */
1652 * Set all the fields in a bmap extent record from the uncompressed form.
1656 xfs_bmbt_rec_host_t *r,
1659 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
1660 s->br_blockcount, s->br_state);
1665 * Set all the fields in a disk format bmap extent record from the arguments.
1668 xfs_bmbt_disk_set_allf(
1670 xfs_fileoff_t startoff,
1671 xfs_fsblock_t startblock,
1672 xfs_filblks_t blockcount,
1675 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
1677 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
1678 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
1679 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
1682 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
1684 r->l0 = cpu_to_be64(
1685 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1686 ((xfs_bmbt_rec_base_t)startoff << 9) |
1687 ((xfs_bmbt_rec_base_t)startblock >> 43));
1688 r->l1 = cpu_to_be64(
1689 ((xfs_bmbt_rec_base_t)startblock << 21) |
1690 ((xfs_bmbt_rec_base_t)blockcount &
1691 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
1692 #else /* !XFS_BIG_BLKNOS */
1693 if (ISNULLSTARTBLOCK(startblock)) {
1694 r->l0 = cpu_to_be64(
1695 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1696 ((xfs_bmbt_rec_base_t)startoff << 9) |
1697 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
1698 r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
1699 ((xfs_bmbt_rec_base_t)startblock << 21) |
1700 ((xfs_bmbt_rec_base_t)blockcount &
1701 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
1703 r->l0 = cpu_to_be64(
1704 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1705 ((xfs_bmbt_rec_base_t)startoff << 9));
1706 r->l1 = cpu_to_be64(
1707 ((xfs_bmbt_rec_base_t)startblock << 21) |
1708 ((xfs_bmbt_rec_base_t)blockcount &
1709 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
1711 #endif /* XFS_BIG_BLKNOS */
1715 * Set all the fields in a bmap extent record from the uncompressed form.
1718 xfs_bmbt_disk_set_all(
1722 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
1723 s->br_blockcount, s->br_state);
1727 * Set the blockcount field in a bmap extent record.
1730 xfs_bmbt_set_blockcount(
1731 xfs_bmbt_rec_host_t *r,
1734 ASSERT((v & XFS_MASK64HI(43)) == 0);
1735 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
1736 (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
1740 * Set the startblock field in a bmap extent record.
1743 xfs_bmbt_set_startblock(
1744 xfs_bmbt_rec_host_t *r,
1748 ASSERT((v & XFS_MASK64HI(12)) == 0);
1749 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
1750 (xfs_bmbt_rec_base_t)(v >> 43);
1751 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
1752 (xfs_bmbt_rec_base_t)(v << 21);
1753 #else /* !XFS_BIG_BLKNOS */
1754 if (ISNULLSTARTBLOCK(v)) {
1755 r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1756 r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
1757 ((xfs_bmbt_rec_base_t)v << 21) |
1758 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1760 r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1761 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
1762 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1764 #endif /* XFS_BIG_BLKNOS */
1768 * Set the startoff field in a bmap extent record.
1771 xfs_bmbt_set_startoff(
1772 xfs_bmbt_rec_host_t *r,
1775 ASSERT((v & XFS_MASK64HI(9)) == 0);
1776 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
1777 ((xfs_bmbt_rec_base_t)v << 9) |
1778 (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
1782 * Set the extent state field in a bmap extent record.
1786 xfs_bmbt_rec_host_t *r,
1789 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
1790 if (v == XFS_EXT_NORM)
1791 r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
1793 r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
1797 * Convert in-memory form of btree root to on-disk form.
1801 xfs_bmbt_block_t *rblock,
1803 xfs_bmdr_block_t *dblock,
1807 xfs_bmbt_key_t *fkp;
1809 xfs_bmbt_key_t *tkp;
1812 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
1813 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
1814 ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
1815 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
1816 dblock->bb_level = rblock->bb_level;
1817 dblock->bb_numrecs = rblock->bb_numrecs;
1818 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
1819 fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
1820 tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
1821 fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
1822 tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
1823 dmxr = be16_to_cpu(dblock->bb_numrecs);
1824 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
1825 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
1829 * Check extent records, which have just been read, for
1830 * any bit in the extent flag field. ASSERT on debug
1831 * kernels, as this condition should not occur.
1832 * Return an error condition (1) if any flags found,
1833 * otherwise return 0.
1837 xfs_check_nostate_extents(
1842 for (; num > 0; num--, idx++) {
1843 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1845 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
1854 STATIC struct xfs_btree_cur *
1855 xfs_bmbt_dup_cursor(
1856 struct xfs_btree_cur *cur)
1858 struct xfs_btree_cur *new;
1860 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
1861 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
1864 * Copy the firstblock, flist, and flags values,
1865 * since init cursor doesn't get them.
1867 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
1868 new->bc_private.b.flist = cur->bc_private.b.flist;
1869 new->bc_private.b.flags = cur->bc_private.b.flags;
1875 xfs_bmbt_get_maxrecs(
1876 struct xfs_btree_cur *cur,
1879 return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
1883 xfs_bmbt_init_key_from_rec(
1884 union xfs_btree_key *key,
1885 union xfs_btree_rec *rec)
1887 key->bmbt.br_startoff =
1888 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
1892 xfs_bmbt_init_ptr_from_cur(
1893 struct xfs_btree_cur *cur,
1894 union xfs_btree_ptr *ptr)
1901 struct xfs_btree_cur *cur,
1902 union xfs_btree_key *key)
1904 return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
1905 cur->bc_rec.b.br_startoff;
1908 #ifdef XFS_BTREE_TRACE
1909 ktrace_t *xfs_bmbt_trace_buf;
1912 xfs_bmbt_trace_enter(
1913 struct xfs_btree_cur *cur,
1930 struct xfs_inode *ip = cur->bc_private.b.ip;
1931 int whichfork = cur->bc_private.b.whichfork;
1933 ktrace_enter(xfs_bmbt_trace_buf,
1934 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
1935 (void *)func, (void *)s, (void *)ip, (void *)cur,
1936 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1937 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1938 (void *)a8, (void *)a9, (void *)a10);
1939 ktrace_enter(ip->i_btrace,
1940 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
1941 (void *)func, (void *)s, (void *)ip, (void *)cur,
1942 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1943 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1944 (void *)a8, (void *)a9, (void *)a10);
1948 xfs_bmbt_trace_cursor(
1949 struct xfs_btree_cur *cur,
1954 struct xfs_bmbt_rec_host r;
1956 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
1958 *s0 = (cur->bc_nlevels << 24) |
1959 (cur->bc_private.b.flags << 16) |
1960 cur->bc_private.b.allocated;
1967 struct xfs_btree_cur *cur,
1968 union xfs_btree_key *key,
1972 *l0 = be64_to_cpu(key->bmbt.br_startoff);
1977 xfs_bmbt_trace_record(
1978 struct xfs_btree_cur *cur,
1979 union xfs_btree_rec *rec,
1984 struct xfs_bmbt_irec irec;
1986 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
1987 *l0 = irec.br_startoff;
1988 *l1 = irec.br_startblock;
1989 *l2 = irec.br_blockcount;
1991 #endif /* XFS_BTREE_TRACE */
1993 static const struct xfs_btree_ops xfs_bmbt_ops = {
1994 .rec_len = sizeof(xfs_bmbt_rec_t),
1995 .key_len = sizeof(xfs_bmbt_key_t),
1997 .dup_cursor = xfs_bmbt_dup_cursor,
1998 .get_maxrecs = xfs_bmbt_get_maxrecs,
1999 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
2000 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
2001 .key_diff = xfs_bmbt_key_diff,
2003 #ifdef XFS_BTREE_TRACE
2004 .trace_enter = xfs_bmbt_trace_enter,
2005 .trace_cursor = xfs_bmbt_trace_cursor,
2006 .trace_key = xfs_bmbt_trace_key,
2007 .trace_record = xfs_bmbt_trace_record,
2012 * Allocate a new bmap btree cursor.
2014 struct xfs_btree_cur * /* new bmap btree cursor */
2015 xfs_bmbt_init_cursor(
2016 struct xfs_mount *mp, /* file system mount point */
2017 struct xfs_trans *tp, /* transaction pointer */
2018 struct xfs_inode *ip, /* inode owning the btree */
2019 int whichfork) /* data or attr fork */
2021 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2022 struct xfs_btree_cur *cur;
2024 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
2028 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
2029 cur->bc_btnum = XFS_BTNUM_BMAP;
2030 cur->bc_blocklog = mp->m_sb.sb_blocklog;
2032 cur->bc_ops = &xfs_bmbt_ops;
2033 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
2035 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
2036 cur->bc_private.b.ip = ip;
2037 cur->bc_private.b.firstblock = NULLFSBLOCK;
2038 cur->bc_private.b.flist = NULL;
2039 cur->bc_private.b.allocated = 0;
2040 cur->bc_private.b.flags = 0;
2041 cur->bc_private.b.whichfork = whichfork;