2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_itable.h"
42 #include "xfs_fsops.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_utils.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_log_priv.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_filestream.h"
50 #include "xfs_da_btree.h"
51 #include "xfs_extfree_item.h"
52 #include "xfs_mru_cache.h"
53 #include "xfs_inode_item.h"
55 #include "xfs_trace.h"
57 #include <linux/namei.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #include <linux/mount.h>
61 #include <linux/mempool.h>
62 #include <linux/writeback.h>
63 #include <linux/kthread.h>
64 #include <linux/freezer.h>
65 #include <linux/parser.h>
67 static const struct super_operations xfs_super_operations;
68 static kmem_zone_t *xfs_ioend_zone;
69 mempool_t *xfs_ioend_pool;
71 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
72 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
73 #define MNTOPT_LOGDEV "logdev" /* log device */
74 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
75 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
76 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
77 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
78 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
79 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
80 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
81 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
82 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
83 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
84 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
85 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
86 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
87 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
88 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
89 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
90 * unwritten extent conversion */
91 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
92 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
93 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
94 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
95 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
96 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
98 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
99 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
100 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
101 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
102 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
103 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
104 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
105 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
106 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
107 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
108 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
109 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
110 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
111 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
112 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
113 #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */
114 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
115 #define MNTOPT_DISCARD "discard" /* Discard unused blocks */
116 #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
119 * Table driven mount option parser.
121 * Currently only used for remount, but it will be used for mount
122 * in the future, too.
125 Opt_barrier, Opt_nobarrier, Opt_err
128 static const match_table_t tokens = {
129 {Opt_barrier, "barrier"},
130 {Opt_nobarrier, "nobarrier"},
136 suffix_strtoul(char *s, char **endp, unsigned int base)
138 int last, shift_left_factor = 0;
141 last = strlen(value) - 1;
142 if (value[last] == 'K' || value[last] == 'k') {
143 shift_left_factor = 10;
146 if (value[last] == 'M' || value[last] == 'm') {
147 shift_left_factor = 20;
150 if (value[last] == 'G' || value[last] == 'g') {
151 shift_left_factor = 30;
155 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
159 * This function fills in xfs_mount_t fields based on mount args.
160 * Note: the superblock has _not_ yet been read in.
162 * Note that this function leaks the various device name allocations on
163 * failure. The caller takes care of them.
167 struct xfs_mount *mp,
170 struct super_block *sb = mp->m_super;
171 char *this_char, *value, *eov;
175 __uint8_t iosizelog = 0;
178 * set up the mount name first so all the errors will refer to the
181 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
184 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
187 * Copy binary VFS mount flags we are interested in.
189 if (sb->s_flags & MS_RDONLY)
190 mp->m_flags |= XFS_MOUNT_RDONLY;
191 if (sb->s_flags & MS_DIRSYNC)
192 mp->m_flags |= XFS_MOUNT_DIRSYNC;
193 if (sb->s_flags & MS_SYNCHRONOUS)
194 mp->m_flags |= XFS_MOUNT_WSYNC;
197 * Set some default flags that could be cleared by the mount option
200 mp->m_flags |= XFS_MOUNT_BARRIER;
201 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
202 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
203 mp->m_flags |= XFS_MOUNT_DELAYLOG;
206 * These can be overridden by the mount option parsing.
214 while ((this_char = strsep(&options, ",")) != NULL) {
217 if ((value = strchr(this_char, '=')) != NULL)
220 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
221 if (!value || !*value) {
222 xfs_warn(mp, "%s option requires an argument",
226 mp->m_logbufs = simple_strtoul(value, &eov, 10);
227 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
228 if (!value || !*value) {
229 xfs_warn(mp, "%s option requires an argument",
233 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
234 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
235 if (!value || !*value) {
236 xfs_warn(mp, "%s option requires an argument",
240 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
243 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
244 xfs_warn(mp, "%s option not allowed on this system",
247 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
248 if (!value || !*value) {
249 xfs_warn(mp, "%s option requires an argument",
253 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
256 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
257 if (!value || !*value) {
258 xfs_warn(mp, "%s option requires an argument",
262 iosize = simple_strtoul(value, &eov, 10);
263 iosizelog = ffs(iosize) - 1;
264 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
265 if (!value || !*value) {
266 xfs_warn(mp, "%s option requires an argument",
270 iosize = suffix_strtoul(value, &eov, 10);
271 iosizelog = ffs(iosize) - 1;
272 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
273 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
274 mp->m_flags |= XFS_MOUNT_GRPID;
275 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
276 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
277 mp->m_flags &= ~XFS_MOUNT_GRPID;
278 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
279 mp->m_flags |= XFS_MOUNT_WSYNC;
280 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
281 mp->m_flags |= XFS_MOUNT_NORECOVERY;
282 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
283 mp->m_flags |= XFS_MOUNT_NOALIGN;
284 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
285 mp->m_flags |= XFS_MOUNT_SWALLOC;
286 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
287 if (!value || !*value) {
288 xfs_warn(mp, "%s option requires an argument",
292 dsunit = simple_strtoul(value, &eov, 10);
293 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
294 if (!value || !*value) {
295 xfs_warn(mp, "%s option requires an argument",
299 dswidth = simple_strtoul(value, &eov, 10);
300 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
301 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
303 xfs_warn(mp, "%s option not allowed on this system",
307 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
308 mp->m_flags |= XFS_MOUNT_NOUUID;
309 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
310 mp->m_flags |= XFS_MOUNT_BARRIER;
311 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
312 mp->m_flags &= ~XFS_MOUNT_BARRIER;
313 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
314 mp->m_flags |= XFS_MOUNT_IKEEP;
315 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
316 mp->m_flags &= ~XFS_MOUNT_IKEEP;
317 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
318 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
319 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
320 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
321 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
322 mp->m_flags |= XFS_MOUNT_ATTR2;
323 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
324 mp->m_flags &= ~XFS_MOUNT_ATTR2;
325 mp->m_flags |= XFS_MOUNT_NOATTR2;
326 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
327 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
328 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
329 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
330 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
331 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
332 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
333 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
334 !strcmp(this_char, MNTOPT_UQUOTA) ||
335 !strcmp(this_char, MNTOPT_USRQUOTA)) {
336 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
338 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
339 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
340 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
341 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
342 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
343 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
344 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
346 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
347 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
348 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
349 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
350 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
351 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
353 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
354 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
355 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
356 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
357 mp->m_flags |= XFS_MOUNT_DELAYLOG;
358 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
359 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
360 } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
361 mp->m_flags |= XFS_MOUNT_DISCARD;
362 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
363 mp->m_flags &= ~XFS_MOUNT_DISCARD;
364 } else if (!strcmp(this_char, "ihashsize")) {
366 "ihashsize no longer used, option is deprecated.");
367 } else if (!strcmp(this_char, "osyncisdsync")) {
369 "osyncisdsync has no effect, option is deprecated.");
370 } else if (!strcmp(this_char, "osyncisosync")) {
372 "osyncisosync has no effect, option is deprecated.");
373 } else if (!strcmp(this_char, "irixsgid")) {
375 "irixsgid is now a sysctl(2) variable, option is deprecated.");
377 xfs_warn(mp, "unknown mount option [%s].", this_char);
383 * no recovery flag requires a read-only mount
385 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
386 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
387 xfs_warn(mp, "no-recovery mounts must be read-only.");
391 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
393 "sunit and swidth options incompatible with the noalign option");
397 if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
398 !(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
400 "the discard option is incompatible with the nodelaylog option");
404 #ifndef CONFIG_XFS_QUOTA
405 if (XFS_IS_QUOTA_RUNNING(mp)) {
406 xfs_warn(mp, "quota support not available in this kernel.");
411 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
412 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
413 xfs_warn(mp, "cannot mount with both project and group quota");
417 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
418 xfs_warn(mp, "sunit and swidth must be specified together");
422 if (dsunit && (dswidth % dsunit != 0)) {
424 "stripe width (%d) must be a multiple of the stripe unit (%d)",
430 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
432 * At this point the superblock has not been read
433 * in, therefore we do not know the block size.
434 * Before the mount call ends we will convert
438 mp->m_dalign = dsunit;
439 mp->m_flags |= XFS_MOUNT_RETERR;
443 mp->m_swidth = dswidth;
446 if (mp->m_logbufs != -1 &&
447 mp->m_logbufs != 0 &&
448 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
449 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
450 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
451 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
452 return XFS_ERROR(EINVAL);
454 if (mp->m_logbsize != -1 &&
455 mp->m_logbsize != 0 &&
456 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
457 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
458 !is_power_of_2(mp->m_logbsize))) {
460 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
462 return XFS_ERROR(EINVAL);
466 if (iosizelog > XFS_MAX_IO_LOG ||
467 iosizelog < XFS_MIN_IO_LOG) {
468 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
469 iosizelog, XFS_MIN_IO_LOG,
471 return XFS_ERROR(EINVAL);
474 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
475 mp->m_readio_log = iosizelog;
476 mp->m_writeio_log = iosizelog;
482 struct proc_xfs_info {
489 struct xfs_mount *mp,
492 static struct proc_xfs_info xfs_info_set[] = {
493 /* the few simple ones we can get from the mount struct */
494 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
495 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
496 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
497 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
498 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
499 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
500 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
501 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
502 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
503 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
504 { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD },
507 static struct proc_xfs_info xfs_info_unset[] = {
508 /* the few simple ones we can get from the mount struct */
509 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
510 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
511 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
514 struct proc_xfs_info *xfs_infop;
516 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
517 if (mp->m_flags & xfs_infop->flag)
518 seq_puts(m, xfs_infop->str);
520 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
521 if (!(mp->m_flags & xfs_infop->flag))
522 seq_puts(m, xfs_infop->str);
525 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
526 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
527 (int)(1 << mp->m_writeio_log) >> 10);
529 if (mp->m_logbufs > 0)
530 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
531 if (mp->m_logbsize > 0)
532 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
535 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
537 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
539 if (mp->m_dalign > 0)
540 seq_printf(m, "," MNTOPT_SUNIT "=%d",
541 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
542 if (mp->m_swidth > 0)
543 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
544 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
546 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
547 seq_puts(m, "," MNTOPT_USRQUOTA);
548 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
549 seq_puts(m, "," MNTOPT_UQUOTANOENF);
551 /* Either project or group quotas can be active, not both */
553 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
554 if (mp->m_qflags & XFS_OQUOTA_ENFD)
555 seq_puts(m, "," MNTOPT_PRJQUOTA);
557 seq_puts(m, "," MNTOPT_PQUOTANOENF);
558 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
559 if (mp->m_qflags & XFS_OQUOTA_ENFD)
560 seq_puts(m, "," MNTOPT_GRPQUOTA);
562 seq_puts(m, "," MNTOPT_GQUOTANOENF);
565 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
566 seq_puts(m, "," MNTOPT_NOQUOTA);
572 unsigned int blockshift)
574 unsigned int pagefactor = 1;
575 unsigned int bitshift = BITS_PER_LONG - 1;
577 /* Figure out maximum filesize, on Linux this can depend on
578 * the filesystem blocksize (on 32 bit platforms).
579 * __block_write_begin does this in an [unsigned] long...
580 * page->index << (PAGE_CACHE_SHIFT - bbits)
581 * So, for page sized blocks (4K on 32 bit platforms),
582 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
583 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
584 * but for smaller blocksizes it is less (bbits = log2 bsize).
585 * Note1: get_block_t takes a long (implicit cast from above)
586 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
587 * can optionally convert the [unsigned] long from above into
588 * an [unsigned] long long.
591 #if BITS_PER_LONG == 32
592 # if defined(CONFIG_LBDAF)
593 ASSERT(sizeof(sector_t) == 8);
594 pagefactor = PAGE_CACHE_SIZE;
595 bitshift = BITS_PER_LONG;
597 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
601 return (((__uint64_t)pagefactor) << bitshift) - 1;
608 struct block_device **bdevp)
612 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
614 if (IS_ERR(*bdevp)) {
615 error = PTR_ERR(*bdevp);
616 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
624 struct block_device *bdev)
627 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
631 xfs_blkdev_issue_flush(
632 xfs_buftarg_t *buftarg)
634 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
639 struct xfs_mount *mp)
641 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
642 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
643 xfs_free_buftarg(mp, mp->m_logdev_targp);
644 xfs_blkdev_put(logdev);
646 if (mp->m_rtdev_targp) {
647 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
648 xfs_free_buftarg(mp, mp->m_rtdev_targp);
649 xfs_blkdev_put(rtdev);
651 xfs_free_buftarg(mp, mp->m_ddev_targp);
655 * The file system configurations are:
656 * (1) device (partition) with data and internal log
657 * (2) logical volume with data and log subvolumes.
658 * (3) logical volume with data, log, and realtime subvolumes.
660 * We only have to handle opening the log and realtime volumes here if
661 * they are present. The data subvolume has already been opened by
662 * get_sb_bdev() and is stored in sb->s_bdev.
666 struct xfs_mount *mp)
668 struct block_device *ddev = mp->m_super->s_bdev;
669 struct block_device *logdev = NULL, *rtdev = NULL;
673 * Open real time and log devices - order is important.
676 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
682 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
684 goto out_close_logdev;
686 if (rtdev == ddev || rtdev == logdev) {
688 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
690 goto out_close_rtdev;
695 * Setup xfs_mount buffer target pointers
698 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
699 if (!mp->m_ddev_targp)
700 goto out_close_rtdev;
703 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
705 if (!mp->m_rtdev_targp)
706 goto out_free_ddev_targ;
709 if (logdev && logdev != ddev) {
710 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
712 if (!mp->m_logdev_targp)
713 goto out_free_rtdev_targ;
715 mp->m_logdev_targp = mp->m_ddev_targp;
721 if (mp->m_rtdev_targp)
722 xfs_free_buftarg(mp, mp->m_rtdev_targp);
724 xfs_free_buftarg(mp, mp->m_ddev_targp);
727 xfs_blkdev_put(rtdev);
729 if (logdev && logdev != ddev)
730 xfs_blkdev_put(logdev);
736 * Setup xfs_mount buffer target pointers based on superblock
740 struct xfs_mount *mp)
744 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
745 mp->m_sb.sb_sectsize);
749 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
750 unsigned int log_sector_size = BBSIZE;
752 if (xfs_sb_version_hassector(&mp->m_sb))
753 log_sector_size = mp->m_sb.sb_logsectsize;
754 error = xfs_setsize_buftarg(mp->m_logdev_targp,
755 mp->m_sb.sb_blocksize,
760 if (mp->m_rtdev_targp) {
761 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
762 mp->m_sb.sb_blocksize,
763 mp->m_sb.sb_sectsize);
771 /* Catch misguided souls that try to use this interface on XFS */
772 STATIC struct inode *
774 struct super_block *sb)
781 * Now that the generic code is guaranteed not to be accessing
782 * the linux inode, we can reclaim the inode.
785 xfs_fs_destroy_inode(
788 struct xfs_inode *ip = XFS_I(inode);
790 trace_xfs_destroy_inode(ip);
792 XFS_STATS_INC(vn_reclaim);
794 /* bad inode, get out here ASAP */
795 if (is_bad_inode(inode))
800 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
803 * We should never get here with one of the reclaim flags already set.
805 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
806 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
809 * We always use background reclaim here because even if the
810 * inode is clean, it still may be under IO and hence we have
811 * to take the flush lock. The background reclaim path handles
812 * this more efficiently than we can here, so simply let background
813 * reclaim tear down all inodes.
816 xfs_inode_set_reclaim_tag(ip);
820 * Slab object creation initialisation for the XFS inode.
821 * This covers only the idempotent fields in the XFS inode;
822 * all other fields need to be initialised on allocation
823 * from the slab. This avoids the need to repeatedly initialise
824 * fields in the xfs inode that left in the initialise state
825 * when freeing the inode.
828 xfs_fs_inode_init_once(
831 struct xfs_inode *ip = inode;
833 memset(ip, 0, sizeof(struct xfs_inode));
836 inode_init_once(VFS_I(ip));
839 atomic_set(&ip->i_iocount, 0);
840 atomic_set(&ip->i_pincount, 0);
841 spin_lock_init(&ip->i_flags_lock);
842 init_waitqueue_head(&ip->i_ipin_wait);
844 * Because we want to use a counting completion, complete
845 * the flush completion once to allow a single access to
846 * the flush completion without blocking.
848 init_completion(&ip->i_flush);
849 complete(&ip->i_flush);
851 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
852 "xfsino", ip->i_ino);
856 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
857 * we catch unlogged VFS level updates to the inode.
859 * We need the barrier() to maintain correct ordering between unlogged
860 * updates and the transaction commit code that clears the i_update_core
861 * field. This requires all updates to be completed before marking the
870 XFS_I(inode)->i_update_core = 1;
875 struct xfs_inode *ip)
877 struct xfs_mount *mp = ip->i_mount;
878 struct xfs_trans *tp;
881 xfs_iunlock(ip, XFS_ILOCK_SHARED);
882 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
883 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
886 xfs_trans_cancel(tp, 0);
887 /* we need to return with the lock hold shared */
888 xfs_ilock(ip, XFS_ILOCK_SHARED);
892 xfs_ilock(ip, XFS_ILOCK_EXCL);
895 * Note - it's possible that we might have pushed ourselves out of the
896 * way during trans_reserve which would flush the inode. But there's
897 * no guarantee that the inode buffer has actually gone out yet (it's
898 * delwri). Plus the buffer could be pinned anyway if it's part of
899 * an inode in another recent transaction. So we play it safe and
900 * fire off the transaction anyway.
902 xfs_trans_ijoin(tp, ip);
903 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
904 error = xfs_trans_commit(tp, 0);
905 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
913 struct writeback_control *wbc)
915 struct xfs_inode *ip = XFS_I(inode);
916 struct xfs_mount *mp = ip->i_mount;
919 trace_xfs_write_inode(ip);
921 if (XFS_FORCED_SHUTDOWN(mp))
922 return XFS_ERROR(EIO);
924 if (wbc->sync_mode == WB_SYNC_ALL) {
926 * Make sure the inode has made it it into the log. Instead
927 * of forcing it all the way to stable storage using a
928 * synchronous transaction we let the log force inside the
929 * ->sync_fs call do that for thus, which reduces the number
930 * of synchronous log foces dramatically.
933 xfs_ilock(ip, XFS_ILOCK_SHARED);
934 if (ip->i_update_core) {
935 error = xfs_log_inode(ip);
941 * We make this non-blocking if the inode is contended, return
942 * EAGAIN to indicate to the caller that they did not succeed.
943 * This prevents the flush path from blocking on inodes inside
944 * another operation right now, they get caught later by
947 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
950 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
954 * Now we have the flush lock and the inode is not pinned, we
955 * can check if the inode is really clean as we know that
956 * there are no pending transaction completions, it is not
957 * waiting on the delayed write queue and there is no IO in
960 if (xfs_inode_clean(ip)) {
965 error = xfs_iflush(ip, SYNC_TRYLOCK);
969 xfs_iunlock(ip, XFS_ILOCK_SHARED);
972 * if we failed to write out the inode then mark
973 * it dirty again so we'll try again later.
976 xfs_mark_inode_dirty_sync(ip);
984 xfs_inode_t *ip = XFS_I(inode);
986 trace_xfs_evict_inode(ip);
988 truncate_inode_pages(&inode->i_data, 0);
989 end_writeback(inode);
990 XFS_STATS_INC(vn_rele);
991 XFS_STATS_INC(vn_remove);
992 XFS_STATS_DEC(vn_active);
995 * The iolock is used by the file system to coordinate reads,
996 * writes, and block truncates. Up to this point the lock
997 * protected concurrent accesses by users of the inode. But
998 * from here forward we're doing some final processing of the
999 * inode because we're done with it, and although we reuse the
1000 * iolock for protection it is really a distinct lock class
1001 * (in the lockdep sense) from before. To keep lockdep happy
1002 * (and basically indicate what we are doing), we explicitly
1003 * re-init the iolock here.
1005 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1006 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1007 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
1008 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
1015 struct xfs_mount *mp)
1017 kfree(mp->m_fsname);
1018 kfree(mp->m_rtname);
1019 kfree(mp->m_logname);
1024 struct super_block *sb)
1026 struct xfs_mount *mp = XFS_M(sb);
1031 * Blow away any referenced inode in the filestreams cache.
1032 * This can and will cause log traffic as inodes go inactive
1035 xfs_filestream_unmount(mp);
1037 XFS_bflush(mp->m_ddev_targp);
1041 xfs_icsb_destroy_counters(mp);
1042 xfs_close_devices(mp);
1043 xfs_free_fsname(mp);
1049 struct super_block *sb,
1052 struct xfs_mount *mp = XFS_M(sb);
1056 * Not much we can do for the first async pass. Writing out the
1057 * superblock would be counter-productive as we are going to redirty
1058 * when writing out other data and metadata (and writing out a single
1059 * block is quite fast anyway).
1061 * Try to asynchronously kick off quota syncing at least.
1064 xfs_qm_sync(mp, SYNC_TRYLOCK);
1068 error = xfs_quiesce_data(mp);
1074 * The disk must be active because we're syncing.
1075 * We schedule xfssyncd now (now that the disk is
1076 * active) instead of later (when it might not be).
1078 flush_delayed_work_sync(&mp->m_sync_work);
1086 struct dentry *dentry,
1087 struct kstatfs *statp)
1089 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1090 xfs_sb_t *sbp = &mp->m_sb;
1091 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1092 __uint64_t fakeinos, id;
1096 statp->f_type = XFS_SB_MAGIC;
1097 statp->f_namelen = MAXNAMELEN - 1;
1099 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1100 statp->f_fsid.val[0] = (u32)id;
1101 statp->f_fsid.val[1] = (u32)(id >> 32);
1103 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1105 spin_lock(&mp->m_sb_lock);
1106 statp->f_bsize = sbp->sb_blocksize;
1107 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1108 statp->f_blocks = sbp->sb_dblocks - lsize;
1109 statp->f_bfree = statp->f_bavail =
1110 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1111 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1113 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1114 if (mp->m_maxicount)
1115 statp->f_files = min_t(typeof(statp->f_files),
1119 /* make sure statp->f_ffree does not underflow */
1120 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1121 statp->f_ffree = max_t(__int64_t, ffree, 0);
1123 spin_unlock(&mp->m_sb_lock);
1125 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1126 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1127 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1128 xfs_qm_statvfs(ip, statp);
1133 xfs_save_resvblks(struct xfs_mount *mp)
1135 __uint64_t resblks = 0;
1137 mp->m_resblks_save = mp->m_resblks;
1138 xfs_reserve_blocks(mp, &resblks, NULL);
1142 xfs_restore_resvblks(struct xfs_mount *mp)
1146 if (mp->m_resblks_save) {
1147 resblks = mp->m_resblks_save;
1148 mp->m_resblks_save = 0;
1150 resblks = xfs_default_resblks(mp);
1152 xfs_reserve_blocks(mp, &resblks, NULL);
1157 struct super_block *sb,
1161 struct xfs_mount *mp = XFS_M(sb);
1162 substring_t args[MAX_OPT_ARGS];
1166 while ((p = strsep(&options, ",")) != NULL) {
1172 token = match_token(p, tokens, args);
1175 mp->m_flags |= XFS_MOUNT_BARRIER;
1178 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1182 * Logically we would return an error here to prevent
1183 * users from believing they might have changed
1184 * mount options using remount which can't be changed.
1186 * But unfortunately mount(8) adds all options from
1187 * mtab and fstab to the mount arguments in some cases
1188 * so we can't blindly reject options, but have to
1189 * check for each specified option if it actually
1190 * differs from the currently set option and only
1191 * reject it if that's the case.
1193 * Until that is implemented we return success for
1194 * every remount request, and silently ignore all
1195 * options that we can't actually change.
1199 "mount option \"%s\" not supported for remount\n", p);
1208 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1209 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1212 * If this is the first remount to writeable state we
1213 * might have some superblock changes to update.
1215 if (mp->m_update_flags) {
1216 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1218 xfs_warn(mp, "failed to write sb changes");
1221 mp->m_update_flags = 0;
1225 * Fill out the reserve pool if it is empty. Use the stashed
1226 * value if it is non-zero, otherwise go with the default.
1228 xfs_restore_resvblks(mp);
1232 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1234 * After we have synced the data but before we sync the
1235 * metadata, we need to free up the reserve block pool so that
1236 * the used block count in the superblock on disk is correct at
1237 * the end of the remount. Stash the current reserve pool size
1238 * so that if we get remounted rw, we can return it to the same
1242 xfs_quiesce_data(mp);
1243 xfs_save_resvblks(mp);
1244 xfs_quiesce_attr(mp);
1245 mp->m_flags |= XFS_MOUNT_RDONLY;
1252 * Second stage of a freeze. The data is already frozen so we only
1253 * need to take care of the metadata. Once that's done write a dummy
1254 * record to dirty the log in case of a crash while frozen.
1258 struct super_block *sb)
1260 struct xfs_mount *mp = XFS_M(sb);
1262 xfs_save_resvblks(mp);
1263 xfs_quiesce_attr(mp);
1264 return -xfs_fs_log_dummy(mp);
1269 struct super_block *sb)
1271 struct xfs_mount *mp = XFS_M(sb);
1273 xfs_restore_resvblks(mp);
1278 xfs_fs_show_options(
1280 struct vfsmount *mnt)
1282 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1286 * This function fills in xfs_mount_t fields based on mount args.
1287 * Note: the superblock _has_ now been read in.
1291 struct xfs_mount *mp)
1293 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1295 /* Fail a mount where the logbuf is smaller than the log stripe */
1296 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1297 if (mp->m_logbsize <= 0 &&
1298 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1299 mp->m_logbsize = mp->m_sb.sb_logsunit;
1300 } else if (mp->m_logbsize > 0 &&
1301 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1303 "logbuf size must be greater than or equal to log stripe size");
1304 return XFS_ERROR(EINVAL);
1307 /* Fail a mount if the logbuf is larger than 32K */
1308 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1310 "logbuf size for version 1 logs must be 16K or 32K");
1311 return XFS_ERROR(EINVAL);
1316 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1317 * told by noattr2 to turn it off
1319 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1320 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1321 mp->m_flags |= XFS_MOUNT_ATTR2;
1324 * prohibit r/w mounts of read-only filesystems
1326 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1328 "cannot mount a read-only filesystem as read-write");
1329 return XFS_ERROR(EROFS);
1337 struct super_block *sb,
1342 struct xfs_mount *mp = NULL;
1343 int flags = 0, error = ENOMEM;
1345 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1349 spin_lock_init(&mp->m_sb_lock);
1350 mutex_init(&mp->m_growlock);
1351 atomic_set(&mp->m_active_trans, 0);
1356 error = xfs_parseargs(mp, (char *)data);
1358 goto out_free_fsname;
1360 sb_min_blocksize(sb, BBSIZE);
1361 sb->s_xattr = xfs_xattr_handlers;
1362 sb->s_export_op = &xfs_export_operations;
1363 #ifdef CONFIG_XFS_QUOTA
1364 sb->s_qcop = &xfs_quotactl_operations;
1366 sb->s_op = &xfs_super_operations;
1369 flags |= XFS_MFSI_QUIET;
1371 error = xfs_open_devices(mp);
1373 goto out_free_fsname;
1375 error = xfs_icsb_init_counters(mp);
1377 goto out_close_devices;
1379 error = xfs_readsb(mp, flags);
1381 goto out_destroy_counters;
1383 error = xfs_finish_flags(mp);
1387 error = xfs_setup_devices(mp);
1391 error = xfs_filestream_mount(mp);
1396 * we must configure the block size in the superblock before we run the
1397 * full mount process as the mount process can lookup and cache inodes.
1398 * For the same reason we must also initialise the syncd and register
1399 * the inode cache shrinker so that inodes can be reclaimed during
1400 * operations like a quotacheck that iterate all inodes in the
1403 sb->s_magic = XFS_SB_MAGIC;
1404 sb->s_blocksize = mp->m_sb.sb_blocksize;
1405 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1406 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1407 sb->s_time_gran = 1;
1408 set_posix_acl_flag(sb);
1410 error = xfs_syncd_init(mp);
1412 goto out_filestream_unmount;
1414 error = xfs_mountfs(mp);
1416 goto out_syncd_stop;
1418 root = igrab(VFS_I(mp->m_rootip));
1423 if (is_bad_inode(root)) {
1427 sb->s_root = d_alloc_root(root);
1437 out_filestream_unmount:
1438 xfs_filestream_unmount(mp);
1441 out_destroy_counters:
1442 xfs_icsb_destroy_counters(mp);
1444 xfs_close_devices(mp);
1446 xfs_free_fsname(mp);
1463 * Blow away any referenced inode in the filestreams cache.
1464 * This can and will cause log traffic as inodes go inactive
1467 xfs_filestream_unmount(mp);
1469 XFS_bflush(mp->m_ddev_targp);
1475 STATIC struct dentry *
1477 struct file_system_type *fs_type,
1479 const char *dev_name,
1482 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1486 xfs_fs_nr_cached_objects(
1487 struct super_block *sb)
1489 return xfs_reclaim_inodes_count(XFS_M(sb));
1493 xfs_fs_free_cached_objects(
1494 struct super_block *sb,
1497 xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
1500 static const struct super_operations xfs_super_operations = {
1501 .alloc_inode = xfs_fs_alloc_inode,
1502 .destroy_inode = xfs_fs_destroy_inode,
1503 .dirty_inode = xfs_fs_dirty_inode,
1504 .write_inode = xfs_fs_write_inode,
1505 .evict_inode = xfs_fs_evict_inode,
1506 .put_super = xfs_fs_put_super,
1507 .sync_fs = xfs_fs_sync_fs,
1508 .freeze_fs = xfs_fs_freeze,
1509 .unfreeze_fs = xfs_fs_unfreeze,
1510 .statfs = xfs_fs_statfs,
1511 .remount_fs = xfs_fs_remount,
1512 .show_options = xfs_fs_show_options,
1513 .nr_cached_objects = xfs_fs_nr_cached_objects,
1514 .free_cached_objects = xfs_fs_free_cached_objects,
1517 static struct file_system_type xfs_fs_type = {
1518 .owner = THIS_MODULE,
1520 .mount = xfs_fs_mount,
1521 .kill_sb = kill_block_super,
1522 .fs_flags = FS_REQUIRES_DEV,
1526 xfs_init_zones(void)
1529 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1530 if (!xfs_ioend_zone)
1533 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1535 if (!xfs_ioend_pool)
1536 goto out_destroy_ioend_zone;
1538 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1540 if (!xfs_log_ticket_zone)
1541 goto out_destroy_ioend_pool;
1543 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1544 "xfs_bmap_free_item");
1545 if (!xfs_bmap_free_item_zone)
1546 goto out_destroy_log_ticket_zone;
1548 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1550 if (!xfs_btree_cur_zone)
1551 goto out_destroy_bmap_free_item_zone;
1553 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1555 if (!xfs_da_state_zone)
1556 goto out_destroy_btree_cur_zone;
1558 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1559 if (!xfs_dabuf_zone)
1560 goto out_destroy_da_state_zone;
1562 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1563 if (!xfs_ifork_zone)
1564 goto out_destroy_dabuf_zone;
1566 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1567 if (!xfs_trans_zone)
1568 goto out_destroy_ifork_zone;
1570 xfs_log_item_desc_zone =
1571 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1572 "xfs_log_item_desc");
1573 if (!xfs_log_item_desc_zone)
1574 goto out_destroy_trans_zone;
1577 * The size of the zone allocated buf log item is the maximum
1578 * size possible under XFS. This wastes a little bit of memory,
1579 * but it is much faster.
1581 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1582 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1583 NBWORD) * sizeof(int))), "xfs_buf_item");
1584 if (!xfs_buf_item_zone)
1585 goto out_destroy_log_item_desc_zone;
1587 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1588 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1589 sizeof(xfs_extent_t))), "xfs_efd_item");
1591 goto out_destroy_buf_item_zone;
1593 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1594 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1595 sizeof(xfs_extent_t))), "xfs_efi_item");
1597 goto out_destroy_efd_zone;
1600 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1601 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1602 xfs_fs_inode_init_once);
1603 if (!xfs_inode_zone)
1604 goto out_destroy_efi_zone;
1607 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1608 KM_ZONE_SPREAD, NULL);
1610 goto out_destroy_inode_zone;
1614 out_destroy_inode_zone:
1615 kmem_zone_destroy(xfs_inode_zone);
1616 out_destroy_efi_zone:
1617 kmem_zone_destroy(xfs_efi_zone);
1618 out_destroy_efd_zone:
1619 kmem_zone_destroy(xfs_efd_zone);
1620 out_destroy_buf_item_zone:
1621 kmem_zone_destroy(xfs_buf_item_zone);
1622 out_destroy_log_item_desc_zone:
1623 kmem_zone_destroy(xfs_log_item_desc_zone);
1624 out_destroy_trans_zone:
1625 kmem_zone_destroy(xfs_trans_zone);
1626 out_destroy_ifork_zone:
1627 kmem_zone_destroy(xfs_ifork_zone);
1628 out_destroy_dabuf_zone:
1629 kmem_zone_destroy(xfs_dabuf_zone);
1630 out_destroy_da_state_zone:
1631 kmem_zone_destroy(xfs_da_state_zone);
1632 out_destroy_btree_cur_zone:
1633 kmem_zone_destroy(xfs_btree_cur_zone);
1634 out_destroy_bmap_free_item_zone:
1635 kmem_zone_destroy(xfs_bmap_free_item_zone);
1636 out_destroy_log_ticket_zone:
1637 kmem_zone_destroy(xfs_log_ticket_zone);
1638 out_destroy_ioend_pool:
1639 mempool_destroy(xfs_ioend_pool);
1640 out_destroy_ioend_zone:
1641 kmem_zone_destroy(xfs_ioend_zone);
1647 xfs_destroy_zones(void)
1649 kmem_zone_destroy(xfs_ili_zone);
1650 kmem_zone_destroy(xfs_inode_zone);
1651 kmem_zone_destroy(xfs_efi_zone);
1652 kmem_zone_destroy(xfs_efd_zone);
1653 kmem_zone_destroy(xfs_buf_item_zone);
1654 kmem_zone_destroy(xfs_log_item_desc_zone);
1655 kmem_zone_destroy(xfs_trans_zone);
1656 kmem_zone_destroy(xfs_ifork_zone);
1657 kmem_zone_destroy(xfs_dabuf_zone);
1658 kmem_zone_destroy(xfs_da_state_zone);
1659 kmem_zone_destroy(xfs_btree_cur_zone);
1660 kmem_zone_destroy(xfs_bmap_free_item_zone);
1661 kmem_zone_destroy(xfs_log_ticket_zone);
1662 mempool_destroy(xfs_ioend_pool);
1663 kmem_zone_destroy(xfs_ioend_zone);
1668 xfs_init_workqueues(void)
1671 * max_active is set to 8 to give enough concurency to allow
1672 * multiple work operations on each CPU to run. This allows multiple
1673 * filesystems to be running sync work concurrently, and scales with
1674 * the number of CPUs in the system.
1676 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1680 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1682 goto out_destroy_syncd;
1687 destroy_workqueue(xfs_syncd_wq);
1693 xfs_destroy_workqueues(void)
1695 destroy_workqueue(xfs_ail_wq);
1696 destroy_workqueue(xfs_syncd_wq);
1704 printk(KERN_INFO XFS_VERSION_STRING " with "
1705 XFS_BUILD_OPTIONS " enabled\n");
1710 error = xfs_init_zones();
1714 error = xfs_init_workqueues();
1716 goto out_destroy_zones;
1718 error = xfs_mru_cache_init();
1720 goto out_destroy_wq;
1722 error = xfs_filestream_init();
1724 goto out_mru_cache_uninit;
1726 error = xfs_buf_init();
1728 goto out_filestream_uninit;
1730 error = xfs_init_procfs();
1732 goto out_buf_terminate;
1734 error = xfs_sysctl_register();
1736 goto out_cleanup_procfs;
1740 error = register_filesystem(&xfs_fs_type);
1742 goto out_sysctl_unregister;
1745 out_sysctl_unregister:
1746 xfs_sysctl_unregister();
1748 xfs_cleanup_procfs();
1750 xfs_buf_terminate();
1751 out_filestream_uninit:
1752 xfs_filestream_uninit();
1753 out_mru_cache_uninit:
1754 xfs_mru_cache_uninit();
1756 xfs_destroy_workqueues();
1758 xfs_destroy_zones();
1767 unregister_filesystem(&xfs_fs_type);
1768 xfs_sysctl_unregister();
1769 xfs_cleanup_procfs();
1770 xfs_buf_terminate();
1771 xfs_filestream_uninit();
1772 xfs_mru_cache_uninit();
1773 xfs_destroy_workqueues();
1774 xfs_destroy_zones();
1777 module_init(init_xfs_fs);
1778 module_exit(exit_xfs_fs);
1780 MODULE_AUTHOR("Silicon Graphics, Inc.");
1781 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1782 MODULE_LICENSE("GPL");