2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_error.h"
27 #include "xfs_cksum.h"
28 #include "xfs_icache.h"
29 #include "xfs_trans.h"
30 #include "xfs_ialloc.h"
33 * Check that none of the inode's in the buffer have a next
34 * unlinked field of 0.
46 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
48 for (i = 0; i < j; i++) {
49 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
50 if (!dip->di_next_unlinked) {
52 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
53 i, (long long)bp->b_bn);
60 * If we are doing readahead on an inode buffer, we might be in log recovery
61 * reading an inode allocation buffer that hasn't yet been replayed, and hence
62 * has not had the inode cores stamped into it. Hence for readahead, the buffer
63 * may be potentially invalid.
65 * If the readahead buffer is invalid, we need to mark it with an error and
66 * clear the DONE status of the buffer so that a followup read will re-read it
67 * from disk. We don't report the error otherwise to avoid warnings during log
68 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
69 * because all we want to do is say readahead failed; there is no-one to report
70 * the error to, so this will distinguish it from a non-ra verifier failure.
77 struct xfs_mount *mp = bp->b_target->bt_mount;
82 * Validate the magic number and version of every inode in the buffer
84 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
85 for (i = 0; i < ni; i++) {
89 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
90 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
91 XFS_DINODE_GOOD_VERSION(dip->di_version);
92 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
93 XFS_ERRTAG_ITOBP_INOTOBP,
94 XFS_RANDOM_ITOBP_INOTOBP))) {
96 bp->b_flags &= ~XBF_DONE;
97 xfs_buf_ioerror(bp, -EIO);
101 xfs_buf_ioerror(bp, -EFSCORRUPTED);
102 xfs_verifier_error(bp);
105 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
106 (unsigned long long)bp->b_bn, i,
107 be16_to_cpu(dip->di_magic));
111 xfs_inobp_check(mp, bp);
116 xfs_inode_buf_read_verify(
119 xfs_inode_buf_verify(bp, false);
123 xfs_inode_buf_readahead_verify(
126 xfs_inode_buf_verify(bp, true);
130 xfs_inode_buf_write_verify(
133 xfs_inode_buf_verify(bp, false);
136 const struct xfs_buf_ops xfs_inode_buf_ops = {
138 .verify_read = xfs_inode_buf_read_verify,
139 .verify_write = xfs_inode_buf_write_verify,
142 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
143 .name = "xxfs_inode_ra",
144 .verify_read = xfs_inode_buf_readahead_verify,
145 .verify_write = xfs_inode_buf_write_verify,
150 * This routine is called to map an inode to the buffer containing the on-disk
151 * version of the inode. It returns a pointer to the buffer containing the
152 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
153 * pointer to the on-disk inode within that buffer.
155 * If a non-zero error is returned, then the contents of bpp and dipp are
160 struct xfs_mount *mp,
161 struct xfs_trans *tp,
162 struct xfs_imap *imap,
163 struct xfs_dinode **dipp,
164 struct xfs_buf **bpp,
171 buf_flags |= XBF_UNMAPPED;
172 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
173 (int)imap->im_len, buf_flags, &bp,
176 if (error == -EAGAIN) {
177 ASSERT(buf_flags & XBF_TRYLOCK);
181 if (error == -EFSCORRUPTED &&
182 (iget_flags & XFS_IGET_UNTRUSTED))
185 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
191 *dipp = xfs_buf_offset(bp, imap->im_boffset);
196 xfs_dinode_from_disk(
200 to->di_magic = be16_to_cpu(from->di_magic);
201 to->di_mode = be16_to_cpu(from->di_mode);
202 to->di_version = from ->di_version;
203 to->di_format = from->di_format;
204 to->di_onlink = be16_to_cpu(from->di_onlink);
205 to->di_uid = be32_to_cpu(from->di_uid);
206 to->di_gid = be32_to_cpu(from->di_gid);
207 to->di_nlink = be32_to_cpu(from->di_nlink);
208 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
209 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
210 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
211 to->di_flushiter = be16_to_cpu(from->di_flushiter);
212 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
213 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
214 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
215 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
216 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
217 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
218 to->di_size = be64_to_cpu(from->di_size);
219 to->di_nblocks = be64_to_cpu(from->di_nblocks);
220 to->di_extsize = be32_to_cpu(from->di_extsize);
221 to->di_nextents = be32_to_cpu(from->di_nextents);
222 to->di_anextents = be16_to_cpu(from->di_anextents);
223 to->di_forkoff = from->di_forkoff;
224 to->di_aformat = from->di_aformat;
225 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
226 to->di_dmstate = be16_to_cpu(from->di_dmstate);
227 to->di_flags = be16_to_cpu(from->di_flags);
228 to->di_gen = be32_to_cpu(from->di_gen);
230 if (to->di_version == 3) {
231 to->di_changecount = be64_to_cpu(from->di_changecount);
232 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
233 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
234 to->di_flags2 = be64_to_cpu(from->di_flags2);
235 to->di_ino = be64_to_cpu(from->di_ino);
236 to->di_lsn = be64_to_cpu(from->di_lsn);
237 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
238 uuid_copy(&to->di_uuid, &from->di_uuid);
245 xfs_icdinode_t *from)
247 to->di_magic = cpu_to_be16(from->di_magic);
248 to->di_mode = cpu_to_be16(from->di_mode);
249 to->di_version = from ->di_version;
250 to->di_format = from->di_format;
251 to->di_onlink = cpu_to_be16(from->di_onlink);
252 to->di_uid = cpu_to_be32(from->di_uid);
253 to->di_gid = cpu_to_be32(from->di_gid);
254 to->di_nlink = cpu_to_be32(from->di_nlink);
255 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
256 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
257 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
258 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
259 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
260 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
261 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
262 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
263 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
264 to->di_size = cpu_to_be64(from->di_size);
265 to->di_nblocks = cpu_to_be64(from->di_nblocks);
266 to->di_extsize = cpu_to_be32(from->di_extsize);
267 to->di_nextents = cpu_to_be32(from->di_nextents);
268 to->di_anextents = cpu_to_be16(from->di_anextents);
269 to->di_forkoff = from->di_forkoff;
270 to->di_aformat = from->di_aformat;
271 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
272 to->di_dmstate = cpu_to_be16(from->di_dmstate);
273 to->di_flags = cpu_to_be16(from->di_flags);
274 to->di_gen = cpu_to_be32(from->di_gen);
276 if (from->di_version == 3) {
277 to->di_changecount = cpu_to_be64(from->di_changecount);
278 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
279 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
280 to->di_flags2 = cpu_to_be64(from->di_flags2);
281 to->di_ino = cpu_to_be64(from->di_ino);
282 to->di_lsn = cpu_to_be64(from->di_lsn);
283 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
284 uuid_copy(&to->di_uuid, &from->di_uuid);
285 to->di_flushiter = 0;
287 to->di_flushiter = cpu_to_be16(from->di_flushiter);
293 struct xfs_mount *mp,
294 struct xfs_inode *ip,
295 struct xfs_dinode *dip)
297 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
300 /* only version 3 or greater inodes are extensively verified here */
301 if (dip->di_version < 3)
304 if (!xfs_sb_version_hascrc(&mp->m_sb))
306 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
309 if (be64_to_cpu(dip->di_ino) != ip->i_ino)
311 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
318 struct xfs_mount *mp,
319 struct xfs_dinode *dip)
323 if (dip->di_version < 3)
326 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
327 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
329 dip->di_crc = xfs_end_cksum(crc);
333 * Read the disk inode attributes into the in-core inode structure.
335 * For version 5 superblocks, if we are initialising a new inode and we are not
336 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
337 * inode core with a random generation number. If we are keeping inodes around,
338 * we need to read the inode cluster to get the existing generation number off
339 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
340 * format) then log recovery is dependent on the di_flushiter field being
341 * initialised from the current on-disk value and hence we must also read the
356 * Fill in the location information in the in-core inode.
358 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
362 /* shortcut IO on inode allocation if possible */
363 if ((iget_flags & XFS_IGET_CREATE) &&
364 xfs_sb_version_hascrc(&mp->m_sb) &&
365 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
366 /* initialise the on-disk inode core */
367 memset(&ip->i_d, 0, sizeof(ip->i_d));
368 ip->i_d.di_magic = XFS_DINODE_MAGIC;
369 ip->i_d.di_gen = prandom_u32();
370 if (xfs_sb_version_hascrc(&mp->m_sb)) {
371 ip->i_d.di_version = 3;
372 ip->i_d.di_ino = ip->i_ino;
373 uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid);
375 ip->i_d.di_version = 2;
380 * Get pointers to the on-disk inode and the buffer containing it.
382 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
386 /* even unallocated inodes are verified */
387 if (!xfs_dinode_verify(mp, ip, dip)) {
388 xfs_alert(mp, "%s: validation failed for inode %lld failed",
389 __func__, ip->i_ino);
391 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
392 error = -EFSCORRUPTED;
397 * If the on-disk inode is already linked to a directory
398 * entry, copy all of the inode into the in-core inode.
399 * xfs_iformat_fork() handles copying in the inode format
400 * specific information.
401 * Otherwise, just get the truly permanent information.
404 xfs_dinode_from_disk(&ip->i_d, dip);
405 error = xfs_iformat_fork(ip, dip);
408 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
415 * Partial initialisation of the in-core inode. Just the bits
416 * that xfs_ialloc won't overwrite or relies on being correct.
418 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
419 ip->i_d.di_version = dip->di_version;
420 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
421 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
423 if (dip->di_version == 3) {
424 ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
425 uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
429 * Make sure to pull in the mode here as well in
430 * case the inode is released without being used.
431 * This ensures that xfs_inactive() will see that
432 * the inode is already free and not try to mess
433 * with the uninitialized part of it.
439 * Automatically convert version 1 inode formats in memory to version 2
440 * inode format. If the inode is modified, it will get logged and
441 * rewritten as a version 2 inode. We can do this because we set the
442 * superblock feature bit for v2 inodes unconditionally during mount
443 * and it means the reast of the code can assume the inode version is 2
446 if (ip->i_d.di_version == 1) {
447 ip->i_d.di_version = 2;
448 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
449 ip->i_d.di_nlink = ip->i_d.di_onlink;
450 ip->i_d.di_onlink = 0;
451 xfs_set_projid(ip, 0);
454 ip->i_delayed_blks = 0;
457 * Mark the buffer containing the inode as something to keep
458 * around for a while. This helps to keep recently accessed
459 * meta-data in-core longer.
461 xfs_buf_set_ref(bp, XFS_INO_REF);
464 * Use xfs_trans_brelse() to release the buffer containing the on-disk
465 * inode, because it was acquired with xfs_trans_read_buf() in
466 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
467 * brelse(). If we're within a transaction, then xfs_trans_brelse()
468 * will only release the buffer if it is not dirty within the
469 * transaction. It will be OK to release the buffer in this case,
470 * because inodes on disk are never destroyed and we will be locking the
471 * new in-core inode before putting it in the cache where other
472 * processes can find it. Thus we don't have to worry about the inode
473 * being changed just because we released the buffer.
476 xfs_trans_brelse(tp, bp);