2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_btree.h"
40 #include "xfs_btree_trace.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
44 #include "xfs_error.h"
45 #include "xfs_quota.h"
49 * Determine the extent state.
58 ASSERT(blks != 0); /* saved for DMIG */
59 return XFS_EXT_UNWRITTEN;
65 * Convert on-disk form of btree root to in-memory form.
69 xfs_bmdr_block_t *dblock,
71 xfs_bmbt_block_t *rblock,
80 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
81 rblock->bb_level = dblock->bb_level;
82 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
83 rblock->bb_numrecs = dblock->bb_numrecs;
84 rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
85 rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
86 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
87 fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
88 tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
89 fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
90 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
91 dmxr = be16_to_cpu(dblock->bb_numrecs);
92 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
93 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
97 * Convert a compressed bmap extent record to an uncompressed form.
98 * This code must be in sync with the routines xfs_bmbt_get_startoff,
99 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
111 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
112 s->br_startoff = ((xfs_fileoff_t)l0 &
113 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
115 s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
116 (((xfs_fsblock_t)l1) >> 21);
122 b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
123 (((xfs_dfsbno_t)l1) >> 21);
124 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
125 s->br_startblock = (xfs_fsblock_t)b;
128 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
130 #endif /* XFS_BIG_BLKNOS */
131 s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
132 /* This is xfs_extent_state() in-line */
134 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
135 st = XFS_EXT_UNWRITTEN;
143 xfs_bmbt_rec_host_t *r,
146 __xfs_bmbt_get_all(r->l0, r->l1, s);
150 * Extract the blockcount field from an in memory bmap extent record.
153 xfs_bmbt_get_blockcount(
154 xfs_bmbt_rec_host_t *r)
156 return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
160 * Extract the startblock field from an in memory bmap extent record.
163 xfs_bmbt_get_startblock(
164 xfs_bmbt_rec_host_t *r)
167 return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
168 (((xfs_fsblock_t)r->l1) >> 21);
173 b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
174 (((xfs_dfsbno_t)r->l1) >> 21);
175 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
176 return (xfs_fsblock_t)b;
178 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
180 #endif /* XFS_BIG_BLKNOS */
184 * Extract the startoff field from an in memory bmap extent record.
187 xfs_bmbt_get_startoff(
188 xfs_bmbt_rec_host_t *r)
190 return ((xfs_fileoff_t)r->l0 &
191 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
196 xfs_bmbt_rec_host_t *r)
200 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
201 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
205 /* Endian flipping versions of the bmbt extraction functions */
207 xfs_bmbt_disk_get_all(
211 __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
215 * Extract the blockcount field from an on disk bmap extent record.
218 xfs_bmbt_disk_get_blockcount(
221 return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
225 * Extract the startoff field from a disk format bmap extent record.
228 xfs_bmbt_disk_get_startoff(
231 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
232 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
237 * Set all the fields in a bmap extent record from the arguments.
241 xfs_bmbt_rec_host_t *r,
242 xfs_fileoff_t startoff,
243 xfs_fsblock_t startblock,
244 xfs_filblks_t blockcount,
247 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
249 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
250 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
251 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
254 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
256 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
257 ((xfs_bmbt_rec_base_t)startoff << 9) |
258 ((xfs_bmbt_rec_base_t)startblock >> 43);
259 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
260 ((xfs_bmbt_rec_base_t)blockcount &
261 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
262 #else /* !XFS_BIG_BLKNOS */
263 if (ISNULLSTARTBLOCK(startblock)) {
264 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
265 ((xfs_bmbt_rec_base_t)startoff << 9) |
266 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
267 r->l1 = XFS_MASK64HI(11) |
268 ((xfs_bmbt_rec_base_t)startblock << 21) |
269 ((xfs_bmbt_rec_base_t)blockcount &
270 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
272 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
273 ((xfs_bmbt_rec_base_t)startoff << 9);
274 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
275 ((xfs_bmbt_rec_base_t)blockcount &
276 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
278 #endif /* XFS_BIG_BLKNOS */
282 * Set all the fields in a bmap extent record from the uncompressed form.
286 xfs_bmbt_rec_host_t *r,
289 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
290 s->br_blockcount, s->br_state);
295 * Set all the fields in a disk format bmap extent record from the arguments.
298 xfs_bmbt_disk_set_allf(
300 xfs_fileoff_t startoff,
301 xfs_fsblock_t startblock,
302 xfs_filblks_t blockcount,
305 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
307 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
308 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
309 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
312 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
315 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
316 ((xfs_bmbt_rec_base_t)startoff << 9) |
317 ((xfs_bmbt_rec_base_t)startblock >> 43));
319 ((xfs_bmbt_rec_base_t)startblock << 21) |
320 ((xfs_bmbt_rec_base_t)blockcount &
321 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
322 #else /* !XFS_BIG_BLKNOS */
323 if (ISNULLSTARTBLOCK(startblock)) {
325 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
326 ((xfs_bmbt_rec_base_t)startoff << 9) |
327 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
328 r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
329 ((xfs_bmbt_rec_base_t)startblock << 21) |
330 ((xfs_bmbt_rec_base_t)blockcount &
331 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
334 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
335 ((xfs_bmbt_rec_base_t)startoff << 9));
337 ((xfs_bmbt_rec_base_t)startblock << 21) |
338 ((xfs_bmbt_rec_base_t)blockcount &
339 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
341 #endif /* XFS_BIG_BLKNOS */
345 * Set all the fields in a bmap extent record from the uncompressed form.
348 xfs_bmbt_disk_set_all(
352 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
353 s->br_blockcount, s->br_state);
357 * Set the blockcount field in a bmap extent record.
360 xfs_bmbt_set_blockcount(
361 xfs_bmbt_rec_host_t *r,
364 ASSERT((v & XFS_MASK64HI(43)) == 0);
365 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
366 (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
370 * Set the startblock field in a bmap extent record.
373 xfs_bmbt_set_startblock(
374 xfs_bmbt_rec_host_t *r,
378 ASSERT((v & XFS_MASK64HI(12)) == 0);
379 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
380 (xfs_bmbt_rec_base_t)(v >> 43);
381 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
382 (xfs_bmbt_rec_base_t)(v << 21);
383 #else /* !XFS_BIG_BLKNOS */
384 if (ISNULLSTARTBLOCK(v)) {
385 r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
386 r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
387 ((xfs_bmbt_rec_base_t)v << 21) |
388 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
390 r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
391 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
392 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
394 #endif /* XFS_BIG_BLKNOS */
398 * Set the startoff field in a bmap extent record.
401 xfs_bmbt_set_startoff(
402 xfs_bmbt_rec_host_t *r,
405 ASSERT((v & XFS_MASK64HI(9)) == 0);
406 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
407 ((xfs_bmbt_rec_base_t)v << 9) |
408 (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
412 * Set the extent state field in a bmap extent record.
416 xfs_bmbt_rec_host_t *r,
419 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
420 if (v == XFS_EXT_NORM)
421 r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
423 r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
427 * Convert in-memory form of btree root to on-disk form.
431 xfs_bmbt_block_t *rblock,
433 xfs_bmdr_block_t *dblock,
442 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
443 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
444 ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
445 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
446 dblock->bb_level = rblock->bb_level;
447 dblock->bb_numrecs = rblock->bb_numrecs;
448 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
449 fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
450 tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
451 fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
452 tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
453 dmxr = be16_to_cpu(dblock->bb_numrecs);
454 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
455 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
459 * Check extent records, which have just been read, for
460 * any bit in the extent flag field. ASSERT on debug
461 * kernels, as this condition should not occur.
462 * Return an error condition (1) if any flags found,
463 * otherwise return 0.
467 xfs_check_nostate_extents(
472 for (; num > 0; num--, idx++) {
473 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
475 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
484 STATIC struct xfs_btree_cur *
486 struct xfs_btree_cur *cur)
488 struct xfs_btree_cur *new;
490 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
491 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
494 * Copy the firstblock, flist, and flags values,
495 * since init cursor doesn't get them.
497 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
498 new->bc_private.b.flist = cur->bc_private.b.flist;
499 new->bc_private.b.flags = cur->bc_private.b.flags;
505 xfs_bmbt_update_cursor(
506 struct xfs_btree_cur *src,
507 struct xfs_btree_cur *dst)
509 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
510 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
511 ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
513 dst->bc_private.b.allocated += src->bc_private.b.allocated;
514 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
516 src->bc_private.b.allocated = 0;
520 xfs_bmbt_alloc_block(
521 struct xfs_btree_cur *cur,
522 union xfs_btree_ptr *start,
523 union xfs_btree_ptr *new,
527 xfs_alloc_arg_t args; /* block allocation args */
528 int error; /* error return value */
530 memset(&args, 0, sizeof(args));
531 args.tp = cur->bc_tp;
532 args.mp = cur->bc_mp;
533 args.fsbno = cur->bc_private.b.firstblock;
534 args.firstblock = args.fsbno;
536 if (args.fsbno == NULLFSBLOCK) {
537 args.fsbno = be64_to_cpu(start->l);
538 args.type = XFS_ALLOCTYPE_START_BNO;
540 * Make sure there is sufficient room left in the AG to
541 * complete a full tree split for an extent insert. If
542 * we are converting the middle part of an extent then
543 * we may need space for two tree splits.
545 * We are relying on the caller to make the correct block
546 * reservation for this operation to succeed. If the
547 * reservation amount is insufficient then we may fail a
548 * block allocation here and corrupt the filesystem.
550 args.minleft = xfs_trans_get_block_res(args.tp);
551 } else if (cur->bc_private.b.flist->xbf_low) {
552 args.type = XFS_ALLOCTYPE_START_BNO;
554 args.type = XFS_ALLOCTYPE_NEAR_BNO;
557 args.minlen = args.maxlen = args.prod = 1;
558 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
559 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
560 error = XFS_ERROR(ENOSPC);
563 error = xfs_alloc_vextent(&args);
567 if (args.fsbno == NULLFSBLOCK && args.minleft) {
569 * Could not find an AG with enough free space to satisfy
570 * a full btree split. Try again without minleft and if
571 * successful activate the lowspace algorithm.
574 args.type = XFS_ALLOCTYPE_FIRST_AG;
576 error = xfs_alloc_vextent(&args);
579 cur->bc_private.b.flist->xbf_low = 1;
581 if (args.fsbno == NULLFSBLOCK) {
582 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
586 ASSERT(args.len == 1);
587 cur->bc_private.b.firstblock = args.fsbno;
588 cur->bc_private.b.allocated++;
589 cur->bc_private.b.ip->i_d.di_nblocks++;
590 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
591 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
592 XFS_TRANS_DQ_BCOUNT, 1L);
594 new->l = cpu_to_be64(args.fsbno);
596 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
601 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
607 struct xfs_btree_cur *cur,
610 struct xfs_mount *mp = cur->bc_mp;
611 struct xfs_inode *ip = cur->bc_private.b.ip;
612 struct xfs_trans *tp = cur->bc_tp;
613 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
615 xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
616 ip->i_d.di_nblocks--;
618 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
619 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
620 xfs_trans_binval(tp, bp);
625 xfs_bmbt_get_minrecs(
626 struct xfs_btree_cur *cur,
629 return XFS_BMAP_BLOCK_IMINRECS(level, cur);
633 xfs_bmbt_get_maxrecs(
634 struct xfs_btree_cur *cur,
637 return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
641 * Get the maximum records we could store in the on-disk format.
643 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
644 * for the root node this checks the available space in the dinode fork
645 * so that we can resize the in-memory buffer to match it. After a
646 * resize to the maximum size this function returns the same value
647 * as xfs_bmbt_get_maxrecs for the root node, too.
650 xfs_bmbt_get_dmaxrecs(
651 struct xfs_btree_cur *cur,
654 return XFS_BMAP_BLOCK_DMAXRECS(level, cur);
658 xfs_bmbt_init_key_from_rec(
659 union xfs_btree_key *key,
660 union xfs_btree_rec *rec)
662 key->bmbt.br_startoff =
663 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
667 xfs_bmbt_init_rec_from_key(
668 union xfs_btree_key *key,
669 union xfs_btree_rec *rec)
671 ASSERT(key->bmbt.br_startoff != 0);
673 xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
678 xfs_bmbt_init_rec_from_cur(
679 struct xfs_btree_cur *cur,
680 union xfs_btree_rec *rec)
682 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
686 xfs_bmbt_init_ptr_from_cur(
687 struct xfs_btree_cur *cur,
688 union xfs_btree_ptr *ptr)
695 struct xfs_btree_cur *cur,
696 union xfs_btree_key *key)
698 return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
699 cur->bc_rec.b.br_startoff;
704 xfs_bmbt_keys_inorder(
705 struct xfs_btree_cur *cur,
706 union xfs_btree_key *k1,
707 union xfs_btree_key *k2)
709 return be64_to_cpu(k1->bmbt.br_startoff) <
710 be64_to_cpu(k2->bmbt.br_startoff);
714 xfs_bmbt_recs_inorder(
715 struct xfs_btree_cur *cur,
716 union xfs_btree_rec *r1,
717 union xfs_btree_rec *r2)
719 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
720 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
721 xfs_bmbt_disk_get_startoff(&r2->bmbt);
725 #ifdef XFS_BTREE_TRACE
726 ktrace_t *xfs_bmbt_trace_buf;
729 xfs_bmbt_trace_enter(
730 struct xfs_btree_cur *cur,
747 struct xfs_inode *ip = cur->bc_private.b.ip;
748 int whichfork = cur->bc_private.b.whichfork;
750 ktrace_enter(xfs_bmbt_trace_buf,
751 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
752 (void *)func, (void *)s, (void *)ip, (void *)cur,
753 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
754 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
755 (void *)a8, (void *)a9, (void *)a10);
756 ktrace_enter(ip->i_btrace,
757 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
758 (void *)func, (void *)s, (void *)ip, (void *)cur,
759 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
760 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
761 (void *)a8, (void *)a9, (void *)a10);
765 xfs_bmbt_trace_cursor(
766 struct xfs_btree_cur *cur,
771 struct xfs_bmbt_rec_host r;
773 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
775 *s0 = (cur->bc_nlevels << 24) |
776 (cur->bc_private.b.flags << 16) |
777 cur->bc_private.b.allocated;
784 struct xfs_btree_cur *cur,
785 union xfs_btree_key *key,
789 *l0 = be64_to_cpu(key->bmbt.br_startoff);
794 xfs_bmbt_trace_record(
795 struct xfs_btree_cur *cur,
796 union xfs_btree_rec *rec,
801 struct xfs_bmbt_irec irec;
803 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
804 *l0 = irec.br_startoff;
805 *l1 = irec.br_startblock;
806 *l2 = irec.br_blockcount;
808 #endif /* XFS_BTREE_TRACE */
810 static const struct xfs_btree_ops xfs_bmbt_ops = {
811 .rec_len = sizeof(xfs_bmbt_rec_t),
812 .key_len = sizeof(xfs_bmbt_key_t),
814 .dup_cursor = xfs_bmbt_dup_cursor,
815 .update_cursor = xfs_bmbt_update_cursor,
816 .alloc_block = xfs_bmbt_alloc_block,
817 .free_block = xfs_bmbt_free_block,
818 .get_maxrecs = xfs_bmbt_get_maxrecs,
819 .get_minrecs = xfs_bmbt_get_minrecs,
820 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
821 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
822 .init_rec_from_key = xfs_bmbt_init_rec_from_key,
823 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
824 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
825 .key_diff = xfs_bmbt_key_diff,
828 .keys_inorder = xfs_bmbt_keys_inorder,
829 .recs_inorder = xfs_bmbt_recs_inorder,
832 #ifdef XFS_BTREE_TRACE
833 .trace_enter = xfs_bmbt_trace_enter,
834 .trace_cursor = xfs_bmbt_trace_cursor,
835 .trace_key = xfs_bmbt_trace_key,
836 .trace_record = xfs_bmbt_trace_record,
841 * Allocate a new bmap btree cursor.
843 struct xfs_btree_cur * /* new bmap btree cursor */
844 xfs_bmbt_init_cursor(
845 struct xfs_mount *mp, /* file system mount point */
846 struct xfs_trans *tp, /* transaction pointer */
847 struct xfs_inode *ip, /* inode owning the btree */
848 int whichfork) /* data or attr fork */
850 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
851 struct xfs_btree_cur *cur;
853 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
857 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
858 cur->bc_btnum = XFS_BTNUM_BMAP;
859 cur->bc_blocklog = mp->m_sb.sb_blocklog;
861 cur->bc_ops = &xfs_bmbt_ops;
862 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
864 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
865 cur->bc_private.b.ip = ip;
866 cur->bc_private.b.firstblock = NULLFSBLOCK;
867 cur->bc_private.b.flist = NULL;
868 cur->bc_private.b.allocated = 0;
869 cur->bc_private.b.flags = 0;
870 cur->bc_private.b.whichfork = whichfork;