2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/fsnotify.h>
25 #include <linux/pagemap.h>
26 #include <linux/highmem.h>
27 #include <linux/time.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mount.h>
32 #include <linux/mpage.h>
33 #include <linux/namei.h>
34 #include <linux/swap.h>
35 #include <linux/writeback.h>
36 #include <linux/statfs.h>
37 #include <linux/compat.h>
38 #include <linux/bit_spinlock.h>
39 #include <linux/security.h>
40 #include <linux/xattr.h>
41 #include <linux/vmalloc.h>
42 #include <linux/slab.h>
43 #include <linux/blkdev.h>
44 #include <linux/uuid.h>
45 #include <linux/btrfs.h>
46 #include <linux/uaccess.h>
49 #include "transaction.h"
50 #include "btrfs_inode.h"
51 #include "print-tree.h"
54 #include "inode-map.h"
56 #include "rcu-string.h"
58 #include "dev-replace.h"
64 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
65 * structures are incorrect, as the timespec structure from userspace
66 * is 4 bytes too small. We define these alternatives here to teach
67 * the kernel about the 32-bit struct packing.
69 struct btrfs_ioctl_timespec_32 {
72 } __attribute__ ((__packed__));
74 struct btrfs_ioctl_received_subvol_args_32 {
75 char uuid[BTRFS_UUID_SIZE]; /* in */
76 __u64 stransid; /* in */
77 __u64 rtransid; /* out */
78 struct btrfs_ioctl_timespec_32 stime; /* in */
79 struct btrfs_ioctl_timespec_32 rtime; /* out */
81 __u64 reserved[16]; /* in */
82 } __attribute__ ((__packed__));
84 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
85 struct btrfs_ioctl_received_subvol_args_32)
89 static int btrfs_clone(struct inode *src, struct inode *inode,
90 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
93 /* Mask out flags that are inappropriate for the given type of inode. */
94 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
98 else if (S_ISREG(mode))
99 return flags & ~FS_DIRSYNC_FL;
101 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
105 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
107 static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
109 unsigned int iflags = 0;
111 if (flags & BTRFS_INODE_SYNC)
112 iflags |= FS_SYNC_FL;
113 if (flags & BTRFS_INODE_IMMUTABLE)
114 iflags |= FS_IMMUTABLE_FL;
115 if (flags & BTRFS_INODE_APPEND)
116 iflags |= FS_APPEND_FL;
117 if (flags & BTRFS_INODE_NODUMP)
118 iflags |= FS_NODUMP_FL;
119 if (flags & BTRFS_INODE_NOATIME)
120 iflags |= FS_NOATIME_FL;
121 if (flags & BTRFS_INODE_DIRSYNC)
122 iflags |= FS_DIRSYNC_FL;
123 if (flags & BTRFS_INODE_NODATACOW)
124 iflags |= FS_NOCOW_FL;
126 if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
127 iflags |= FS_COMPR_FL;
128 else if (flags & BTRFS_INODE_NOCOMPRESS)
129 iflags |= FS_NOCOMP_FL;
135 * Update inode->i_flags based on the btrfs internal flags.
137 void btrfs_update_iflags(struct inode *inode)
139 struct btrfs_inode *ip = BTRFS_I(inode);
140 unsigned int new_fl = 0;
142 if (ip->flags & BTRFS_INODE_SYNC)
144 if (ip->flags & BTRFS_INODE_IMMUTABLE)
145 new_fl |= S_IMMUTABLE;
146 if (ip->flags & BTRFS_INODE_APPEND)
148 if (ip->flags & BTRFS_INODE_NOATIME)
150 if (ip->flags & BTRFS_INODE_DIRSYNC)
153 set_mask_bits(&inode->i_flags,
154 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
159 * Inherit flags from the parent inode.
161 * Currently only the compression flags and the cow flags are inherited.
163 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
170 flags = BTRFS_I(dir)->flags;
172 if (flags & BTRFS_INODE_NOCOMPRESS) {
173 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
174 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
175 } else if (flags & BTRFS_INODE_COMPRESS) {
176 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
177 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
180 if (flags & BTRFS_INODE_NODATACOW) {
181 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
182 if (S_ISREG(inode->i_mode))
183 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
186 btrfs_update_iflags(inode);
189 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
191 struct btrfs_inode *ip = BTRFS_I(file_inode(file));
192 unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
194 if (copy_to_user(arg, &flags, sizeof(flags)))
199 static int check_flags(unsigned int flags)
201 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
202 FS_NOATIME_FL | FS_NODUMP_FL | \
203 FS_SYNC_FL | FS_DIRSYNC_FL | \
204 FS_NOCOMP_FL | FS_COMPR_FL |
208 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
214 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
216 struct inode *inode = file_inode(file);
217 struct btrfs_inode *ip = BTRFS_I(inode);
218 struct btrfs_root *root = ip->root;
219 struct btrfs_trans_handle *trans;
220 unsigned int flags, oldflags;
223 unsigned int i_oldflags;
226 if (!inode_owner_or_capable(inode))
229 if (btrfs_root_readonly(root))
232 if (copy_from_user(&flags, arg, sizeof(flags)))
235 ret = check_flags(flags);
239 ret = mnt_want_write_file(file);
243 mutex_lock(&inode->i_mutex);
245 ip_oldflags = ip->flags;
246 i_oldflags = inode->i_flags;
247 mode = inode->i_mode;
249 flags = btrfs_mask_flags(inode->i_mode, flags);
250 oldflags = btrfs_flags_to_ioctl(ip->flags);
251 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
252 if (!capable(CAP_LINUX_IMMUTABLE)) {
258 if (flags & FS_SYNC_FL)
259 ip->flags |= BTRFS_INODE_SYNC;
261 ip->flags &= ~BTRFS_INODE_SYNC;
262 if (flags & FS_IMMUTABLE_FL)
263 ip->flags |= BTRFS_INODE_IMMUTABLE;
265 ip->flags &= ~BTRFS_INODE_IMMUTABLE;
266 if (flags & FS_APPEND_FL)
267 ip->flags |= BTRFS_INODE_APPEND;
269 ip->flags &= ~BTRFS_INODE_APPEND;
270 if (flags & FS_NODUMP_FL)
271 ip->flags |= BTRFS_INODE_NODUMP;
273 ip->flags &= ~BTRFS_INODE_NODUMP;
274 if (flags & FS_NOATIME_FL)
275 ip->flags |= BTRFS_INODE_NOATIME;
277 ip->flags &= ~BTRFS_INODE_NOATIME;
278 if (flags & FS_DIRSYNC_FL)
279 ip->flags |= BTRFS_INODE_DIRSYNC;
281 ip->flags &= ~BTRFS_INODE_DIRSYNC;
282 if (flags & FS_NOCOW_FL) {
285 * It's safe to turn csums off here, no extents exist.
286 * Otherwise we want the flag to reflect the real COW
287 * status of the file and will not set it.
289 if (inode->i_size == 0)
290 ip->flags |= BTRFS_INODE_NODATACOW
291 | BTRFS_INODE_NODATASUM;
293 ip->flags |= BTRFS_INODE_NODATACOW;
297 * Revert back under same assuptions as above
300 if (inode->i_size == 0)
301 ip->flags &= ~(BTRFS_INODE_NODATACOW
302 | BTRFS_INODE_NODATASUM);
304 ip->flags &= ~BTRFS_INODE_NODATACOW;
309 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
310 * flag may be changed automatically if compression code won't make
313 if (flags & FS_NOCOMP_FL) {
314 ip->flags &= ~BTRFS_INODE_COMPRESS;
315 ip->flags |= BTRFS_INODE_NOCOMPRESS;
317 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
318 if (ret && ret != -ENODATA)
320 } else if (flags & FS_COMPR_FL) {
323 ip->flags |= BTRFS_INODE_COMPRESS;
324 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
326 if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
330 ret = btrfs_set_prop(inode, "btrfs.compression",
331 comp, strlen(comp), 0);
336 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
337 if (ret && ret != -ENODATA)
339 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
342 trans = btrfs_start_transaction(root, 1);
344 ret = PTR_ERR(trans);
348 btrfs_update_iflags(inode);
349 inode_inc_iversion(inode);
350 inode->i_ctime = CURRENT_TIME;
351 ret = btrfs_update_inode(trans, root, inode);
353 btrfs_end_transaction(trans, root);
356 ip->flags = ip_oldflags;
357 inode->i_flags = i_oldflags;
361 mutex_unlock(&inode->i_mutex);
362 mnt_drop_write_file(file);
366 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
368 struct inode *inode = file_inode(file);
370 return put_user(inode->i_generation, arg);
373 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
375 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
376 struct btrfs_device *device;
377 struct request_queue *q;
378 struct fstrim_range range;
379 u64 minlen = ULLONG_MAX;
381 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
384 if (!capable(CAP_SYS_ADMIN))
388 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
392 q = bdev_get_queue(device->bdev);
393 if (blk_queue_discard(q)) {
395 minlen = min((u64)q->limits.discard_granularity,
403 if (copy_from_user(&range, arg, sizeof(range)))
405 if (range.start > total_bytes ||
406 range.len < fs_info->sb->s_blocksize)
409 range.len = min(range.len, total_bytes - range.start);
410 range.minlen = max(range.minlen, minlen);
411 ret = btrfs_trim_fs(fs_info->tree_root, &range);
415 if (copy_to_user(arg, &range, sizeof(range)))
421 int btrfs_is_empty_uuid(u8 *uuid)
425 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
432 static noinline int create_subvol(struct inode *dir,
433 struct dentry *dentry,
434 char *name, int namelen,
436 struct btrfs_qgroup_inherit *inherit)
438 struct btrfs_trans_handle *trans;
439 struct btrfs_key key;
440 struct btrfs_root_item root_item;
441 struct btrfs_inode_item *inode_item;
442 struct extent_buffer *leaf;
443 struct btrfs_root *root = BTRFS_I(dir)->root;
444 struct btrfs_root *new_root;
445 struct btrfs_block_rsv block_rsv;
446 struct timespec cur_time = CURRENT_TIME;
451 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
456 ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
461 * Don't create subvolume whose level is not zero. Or qgroup will be
462 * screwed up since it assume subvolme qgroup's level to be 0.
464 if (btrfs_qgroup_level(objectid))
467 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
469 * The same as the snapshot creation, please see the comment
470 * of create_snapshot().
472 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
473 8, &qgroup_reserved, false);
477 trans = btrfs_start_transaction(root, 0);
479 ret = PTR_ERR(trans);
480 btrfs_subvolume_release_metadata(root, &block_rsv,
484 trans->block_rsv = &block_rsv;
485 trans->bytes_reserved = block_rsv.size;
487 ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
491 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
497 memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
498 btrfs_set_header_bytenr(leaf, leaf->start);
499 btrfs_set_header_generation(leaf, trans->transid);
500 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
501 btrfs_set_header_owner(leaf, objectid);
503 write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(),
505 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
506 btrfs_header_chunk_tree_uuid(leaf),
508 btrfs_mark_buffer_dirty(leaf);
510 memset(&root_item, 0, sizeof(root_item));
512 inode_item = &root_item.inode;
513 btrfs_set_stack_inode_generation(inode_item, 1);
514 btrfs_set_stack_inode_size(inode_item, 3);
515 btrfs_set_stack_inode_nlink(inode_item, 1);
516 btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
517 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
519 btrfs_set_root_flags(&root_item, 0);
520 btrfs_set_root_limit(&root_item, 0);
521 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
523 btrfs_set_root_bytenr(&root_item, leaf->start);
524 btrfs_set_root_generation(&root_item, trans->transid);
525 btrfs_set_root_level(&root_item, 0);
526 btrfs_set_root_refs(&root_item, 1);
527 btrfs_set_root_used(&root_item, leaf->len);
528 btrfs_set_root_last_snapshot(&root_item, 0);
530 btrfs_set_root_generation_v2(&root_item,
531 btrfs_root_generation(&root_item));
532 uuid_le_gen(&new_uuid);
533 memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
534 btrfs_set_stack_timespec_sec(&root_item.otime, cur_time.tv_sec);
535 btrfs_set_stack_timespec_nsec(&root_item.otime, cur_time.tv_nsec);
536 root_item.ctime = root_item.otime;
537 btrfs_set_root_ctransid(&root_item, trans->transid);
538 btrfs_set_root_otransid(&root_item, trans->transid);
540 btrfs_tree_unlock(leaf);
541 free_extent_buffer(leaf);
544 btrfs_set_root_dirid(&root_item, new_dirid);
546 key.objectid = objectid;
548 key.type = BTRFS_ROOT_ITEM_KEY;
549 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
554 key.offset = (u64)-1;
555 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
556 if (IS_ERR(new_root)) {
557 ret = PTR_ERR(new_root);
558 btrfs_abort_transaction(trans, root, ret);
562 btrfs_record_root_in_trans(trans, new_root);
564 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
566 /* We potentially lose an unused inode item here */
567 btrfs_abort_transaction(trans, root, ret);
572 * insert the directory item
574 ret = btrfs_set_inode_index(dir, &index);
576 btrfs_abort_transaction(trans, root, ret);
580 ret = btrfs_insert_dir_item(trans, root,
581 name, namelen, dir, &key,
582 BTRFS_FT_DIR, index);
584 btrfs_abort_transaction(trans, root, ret);
588 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
589 ret = btrfs_update_inode(trans, root, dir);
592 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
593 objectid, root->root_key.objectid,
594 btrfs_ino(dir), index, name, namelen);
597 ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
598 root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
601 btrfs_abort_transaction(trans, root, ret);
604 trans->block_rsv = NULL;
605 trans->bytes_reserved = 0;
606 btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
609 *async_transid = trans->transid;
610 err = btrfs_commit_transaction_async(trans, root, 1);
612 err = btrfs_commit_transaction(trans, root);
614 err = btrfs_commit_transaction(trans, root);
620 inode = btrfs_lookup_dentry(dir, dentry);
622 return PTR_ERR(inode);
623 d_instantiate(dentry, inode);
628 static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
634 prepare_to_wait(&root->subv_writers->wait, &wait,
635 TASK_UNINTERRUPTIBLE);
637 writers = percpu_counter_sum(&root->subv_writers->counter);
641 finish_wait(&root->subv_writers->wait, &wait);
645 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
646 struct dentry *dentry, char *name, int namelen,
647 u64 *async_transid, bool readonly,
648 struct btrfs_qgroup_inherit *inherit)
651 struct btrfs_pending_snapshot *pending_snapshot;
652 struct btrfs_trans_handle *trans;
655 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
658 atomic_inc(&root->will_be_snapshoted);
659 smp_mb__after_atomic();
660 btrfs_wait_for_no_snapshoting_writes(root);
662 ret = btrfs_start_delalloc_inodes(root, 0);
666 btrfs_wait_ordered_extents(root, -1);
668 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
669 if (!pending_snapshot) {
674 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
675 BTRFS_BLOCK_RSV_TEMP);
677 * 1 - parent dir inode
680 * 2 - root ref/backref
681 * 1 - root of snapshot
684 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
685 &pending_snapshot->block_rsv, 8,
686 &pending_snapshot->qgroup_reserved,
691 pending_snapshot->dentry = dentry;
692 pending_snapshot->root = root;
693 pending_snapshot->readonly = readonly;
694 pending_snapshot->dir = dir;
695 pending_snapshot->inherit = inherit;
697 trans = btrfs_start_transaction(root, 0);
699 ret = PTR_ERR(trans);
703 spin_lock(&root->fs_info->trans_lock);
704 list_add(&pending_snapshot->list,
705 &trans->transaction->pending_snapshots);
706 spin_unlock(&root->fs_info->trans_lock);
708 *async_transid = trans->transid;
709 ret = btrfs_commit_transaction_async(trans,
710 root->fs_info->extent_root, 1);
712 ret = btrfs_commit_transaction(trans, root);
714 ret = btrfs_commit_transaction(trans,
715 root->fs_info->extent_root);
720 ret = pending_snapshot->error;
724 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
728 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
730 ret = PTR_ERR(inode);
734 d_instantiate(dentry, inode);
737 btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
738 &pending_snapshot->block_rsv,
739 pending_snapshot->qgroup_reserved);
741 kfree(pending_snapshot);
743 if (atomic_dec_and_test(&root->will_be_snapshoted))
744 wake_up_atomic_t(&root->will_be_snapshoted);
748 /* copy of may_delete in fs/namei.c()
749 * Check whether we can remove a link victim from directory dir, check
750 * whether the type of victim is right.
751 * 1. We can't do it if dir is read-only (done in permission())
752 * 2. We should have write and exec permissions on dir
753 * 3. We can't remove anything from append-only dir
754 * 4. We can't do anything with immutable dir (done in permission())
755 * 5. If the sticky bit on dir is set we should either
756 * a. be owner of dir, or
757 * b. be owner of victim, or
758 * c. have CAP_FOWNER capability
759 * 6. If the victim is append-only or immutable we can't do antyhing with
760 * links pointing to it.
761 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
762 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
763 * 9. We can't remove a root or mountpoint.
764 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
765 * nfs_async_unlink().
768 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
772 if (d_really_is_negative(victim))
775 BUG_ON(d_inode(victim->d_parent) != dir);
776 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
778 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
783 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
784 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
787 if (!d_is_dir(victim))
791 } else if (d_is_dir(victim))
795 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
800 /* copy of may_create in fs/namei.c() */
801 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
803 if (d_really_is_positive(child))
807 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
811 * Create a new subvolume below @parent. This is largely modeled after
812 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
813 * inside this filesystem so it's quite a bit simpler.
815 static noinline int btrfs_mksubvol(struct path *parent,
816 char *name, int namelen,
817 struct btrfs_root *snap_src,
818 u64 *async_transid, bool readonly,
819 struct btrfs_qgroup_inherit *inherit)
821 struct inode *dir = d_inode(parent->dentry);
822 struct dentry *dentry;
825 error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
829 dentry = lookup_one_len(name, parent->dentry, namelen);
830 error = PTR_ERR(dentry);
835 if (d_really_is_positive(dentry))
838 error = btrfs_may_create(dir, dentry);
843 * even if this name doesn't exist, we may get hash collisions.
844 * check for them now when we can safely fail
846 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
852 down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
854 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
858 error = create_snapshot(snap_src, dir, dentry, name, namelen,
859 async_transid, readonly, inherit);
861 error = create_subvol(dir, dentry, name, namelen,
862 async_transid, inherit);
865 fsnotify_mkdir(dir, dentry);
867 up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
871 mutex_unlock(&dir->i_mutex);
876 * When we're defragging a range, we don't want to kick it off again
877 * if it is really just waiting for delalloc to send it down.
878 * If we find a nice big extent or delalloc range for the bytes in the
879 * file you want to defrag, we return 0 to let you know to skip this
882 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
884 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
885 struct extent_map *em = NULL;
886 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
889 read_lock(&em_tree->lock);
890 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
891 read_unlock(&em_tree->lock);
894 end = extent_map_end(em);
896 if (end - offset > thresh)
899 /* if we already have a nice delalloc here, just stop */
901 end = count_range_bits(io_tree, &offset, offset + thresh,
902 thresh, EXTENT_DELALLOC, 1);
909 * helper function to walk through a file and find extents
910 * newer than a specific transid, and smaller than thresh.
912 * This is used by the defragging code to find new and small
915 static int find_new_extents(struct btrfs_root *root,
916 struct inode *inode, u64 newer_than,
917 u64 *off, u32 thresh)
919 struct btrfs_path *path;
920 struct btrfs_key min_key;
921 struct extent_buffer *leaf;
922 struct btrfs_file_extent_item *extent;
925 u64 ino = btrfs_ino(inode);
927 path = btrfs_alloc_path();
931 min_key.objectid = ino;
932 min_key.type = BTRFS_EXTENT_DATA_KEY;
933 min_key.offset = *off;
936 ret = btrfs_search_forward(root, &min_key, path, newer_than);
940 if (min_key.objectid != ino)
942 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
945 leaf = path->nodes[0];
946 extent = btrfs_item_ptr(leaf, path->slots[0],
947 struct btrfs_file_extent_item);
949 type = btrfs_file_extent_type(leaf, extent);
950 if (type == BTRFS_FILE_EXTENT_REG &&
951 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
952 check_defrag_in_cache(inode, min_key.offset, thresh)) {
953 *off = min_key.offset;
954 btrfs_free_path(path);
959 if (path->slots[0] < btrfs_header_nritems(leaf)) {
960 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
964 if (min_key.offset == (u64)-1)
968 btrfs_release_path(path);
971 btrfs_free_path(path);
975 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
977 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
978 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
979 struct extent_map *em;
980 u64 len = PAGE_CACHE_SIZE;
983 * hopefully we have this extent in the tree already, try without
984 * the full extent lock
986 read_lock(&em_tree->lock);
987 em = lookup_extent_mapping(em_tree, start, len);
988 read_unlock(&em_tree->lock);
991 struct extent_state *cached = NULL;
992 u64 end = start + len - 1;
994 /* get the big lock and read metadata off disk */
995 lock_extent_bits(io_tree, start, end, 0, &cached);
996 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
997 unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
1006 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1008 struct extent_map *next;
1011 /* this is the last extent */
1012 if (em->start + em->len >= i_size_read(inode))
1015 next = defrag_lookup_extent(inode, em->start + em->len);
1016 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1018 else if ((em->block_start + em->block_len == next->block_start) &&
1019 (em->block_len > 128 * 1024 && next->block_len > 128 * 1024))
1022 free_extent_map(next);
1026 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1027 u64 *last_len, u64 *skip, u64 *defrag_end,
1030 struct extent_map *em;
1032 bool next_mergeable = true;
1033 bool prev_mergeable = true;
1036 * make sure that once we start defragging an extent, we keep on
1039 if (start < *defrag_end)
1044 em = defrag_lookup_extent(inode, start);
1048 /* this will cover holes, and inline extents */
1049 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1055 prev_mergeable = false;
1057 next_mergeable = defrag_check_next_extent(inode, em);
1059 * we hit a real extent, if it is big or the next extent is not a
1060 * real extent, don't bother defragging it
1062 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1063 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1067 * last_len ends up being a counter of how many bytes we've defragged.
1068 * every time we choose not to defrag an extent, we reset *last_len
1069 * so that the next tiny extent will force a defrag.
1071 * The end result of this is that tiny extents before a single big
1072 * extent will force at least part of that big extent to be defragged.
1075 *defrag_end = extent_map_end(em);
1078 *skip = extent_map_end(em);
1082 free_extent_map(em);
1087 * it doesn't do much good to defrag one or two pages
1088 * at a time. This pulls in a nice chunk of pages
1089 * to COW and defrag.
1091 * It also makes sure the delalloc code has enough
1092 * dirty data to avoid making new small extents as part
1095 * It's a good idea to start RA on this range
1096 * before calling this.
1098 static int cluster_pages_for_defrag(struct inode *inode,
1099 struct page **pages,
1100 unsigned long start_index,
1101 unsigned long num_pages)
1103 unsigned long file_end;
1104 u64 isize = i_size_read(inode);
1111 struct btrfs_ordered_extent *ordered;
1112 struct extent_state *cached_state = NULL;
1113 struct extent_io_tree *tree;
1114 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1116 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
1117 if (!isize || start_index > file_end)
1120 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1122 ret = btrfs_delalloc_reserve_space(inode,
1123 start_index << PAGE_CACHE_SHIFT,
1124 page_cnt << PAGE_CACHE_SHIFT);
1128 tree = &BTRFS_I(inode)->io_tree;
1130 /* step one, lock all the pages */
1131 for (i = 0; i < page_cnt; i++) {
1134 page = find_or_create_page(inode->i_mapping,
1135 start_index + i, mask);
1139 page_start = page_offset(page);
1140 page_end = page_start + PAGE_CACHE_SIZE - 1;
1142 lock_extent_bits(tree, page_start, page_end,
1144 ordered = btrfs_lookup_ordered_extent(inode,
1146 unlock_extent_cached(tree, page_start, page_end,
1147 &cached_state, GFP_NOFS);
1152 btrfs_start_ordered_extent(inode, ordered, 1);
1153 btrfs_put_ordered_extent(ordered);
1156 * we unlocked the page above, so we need check if
1157 * it was released or not.
1159 if (page->mapping != inode->i_mapping) {
1161 page_cache_release(page);
1166 if (!PageUptodate(page)) {
1167 btrfs_readpage(NULL, page);
1169 if (!PageUptodate(page)) {
1171 page_cache_release(page);
1177 if (page->mapping != inode->i_mapping) {
1179 page_cache_release(page);
1189 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1193 * so now we have a nice long stream of locked
1194 * and up to date pages, lets wait on them
1196 for (i = 0; i < i_done; i++)
1197 wait_on_page_writeback(pages[i]);
1199 page_start = page_offset(pages[0]);
1200 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
1202 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1203 page_start, page_end - 1, 0, &cached_state);
1204 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1205 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1206 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1207 &cached_state, GFP_NOFS);
1209 if (i_done != page_cnt) {
1210 spin_lock(&BTRFS_I(inode)->lock);
1211 BTRFS_I(inode)->outstanding_extents++;
1212 spin_unlock(&BTRFS_I(inode)->lock);
1213 btrfs_delalloc_release_space(inode,
1214 start_index << PAGE_CACHE_SHIFT,
1215 (page_cnt - i_done) << PAGE_CACHE_SHIFT);
1219 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1220 &cached_state, GFP_NOFS);
1222 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1223 page_start, page_end - 1, &cached_state,
1226 for (i = 0; i < i_done; i++) {
1227 clear_page_dirty_for_io(pages[i]);
1228 ClearPageChecked(pages[i]);
1229 set_page_extent_mapped(pages[i]);
1230 set_page_dirty(pages[i]);
1231 unlock_page(pages[i]);
1232 page_cache_release(pages[i]);
1236 for (i = 0; i < i_done; i++) {
1237 unlock_page(pages[i]);
1238 page_cache_release(pages[i]);
1240 btrfs_delalloc_release_space(inode,
1241 start_index << PAGE_CACHE_SHIFT,
1242 page_cnt << PAGE_CACHE_SHIFT);
1247 int btrfs_defrag_file(struct inode *inode, struct file *file,
1248 struct btrfs_ioctl_defrag_range_args *range,
1249 u64 newer_than, unsigned long max_to_defrag)
1251 struct btrfs_root *root = BTRFS_I(inode)->root;
1252 struct file_ra_state *ra = NULL;
1253 unsigned long last_index;
1254 u64 isize = i_size_read(inode);
1258 u64 newer_off = range->start;
1260 unsigned long ra_index = 0;
1262 int defrag_count = 0;
1263 int compress_type = BTRFS_COMPRESS_ZLIB;
1264 u32 extent_thresh = range->extent_thresh;
1265 unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
1266 unsigned long cluster = max_cluster;
1267 u64 new_align = ~((u64)128 * 1024 - 1);
1268 struct page **pages = NULL;
1273 if (range->start >= isize)
1276 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1277 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1279 if (range->compress_type)
1280 compress_type = range->compress_type;
1283 if (extent_thresh == 0)
1284 extent_thresh = 256 * 1024;
1287 * if we were not given a file, allocate a readahead
1291 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1294 file_ra_state_init(ra, inode->i_mapping);
1299 pages = kmalloc_array(max_cluster, sizeof(struct page *),
1306 /* find the last page to defrag */
1307 if (range->start + range->len > range->start) {
1308 last_index = min_t(u64, isize - 1,
1309 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
1311 last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1315 ret = find_new_extents(root, inode, newer_than,
1316 &newer_off, 64 * 1024);
1318 range->start = newer_off;
1320 * we always align our defrag to help keep
1321 * the extents in the file evenly spaced
1323 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
1327 i = range->start >> PAGE_CACHE_SHIFT;
1330 max_to_defrag = last_index - i + 1;
1333 * make writeback starts from i, so the defrag range can be
1334 * written sequentially.
1336 if (i < inode->i_mapping->writeback_index)
1337 inode->i_mapping->writeback_index = i;
1339 while (i <= last_index && defrag_count < max_to_defrag &&
1340 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
1342 * make sure we stop running if someone unmounts
1345 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1348 if (btrfs_defrag_cancelled(root->fs_info)) {
1349 btrfs_debug(root->fs_info, "defrag_file cancelled");
1354 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
1355 extent_thresh, &last_len, &skip,
1356 &defrag_end, range->flags &
1357 BTRFS_DEFRAG_RANGE_COMPRESS)) {
1360 * the should_defrag function tells us how much to skip
1361 * bump our counter by the suggested amount
1363 next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
1364 i = max(i + 1, next);
1369 cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
1370 PAGE_CACHE_SHIFT) - i;
1371 cluster = min(cluster, max_cluster);
1373 cluster = max_cluster;
1376 if (i + cluster > ra_index) {
1377 ra_index = max(i, ra_index);
1378 btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
1380 ra_index += cluster;
1383 mutex_lock(&inode->i_mutex);
1384 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
1385 BTRFS_I(inode)->force_compress = compress_type;
1386 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1388 mutex_unlock(&inode->i_mutex);
1392 defrag_count += ret;
1393 balance_dirty_pages_ratelimited(inode->i_mapping);
1394 mutex_unlock(&inode->i_mutex);
1397 if (newer_off == (u64)-1)
1403 newer_off = max(newer_off + 1,
1404 (u64)i << PAGE_CACHE_SHIFT);
1406 ret = find_new_extents(root, inode,
1407 newer_than, &newer_off,
1410 range->start = newer_off;
1411 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
1418 last_len += ret << PAGE_CACHE_SHIFT;
1426 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1427 filemap_flush(inode->i_mapping);
1428 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1429 &BTRFS_I(inode)->runtime_flags))
1430 filemap_flush(inode->i_mapping);
1433 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1434 /* the filemap_flush will queue IO into the worker threads, but
1435 * we have to make sure the IO is actually started and that
1436 * ordered extents get created before we return
1438 atomic_inc(&root->fs_info->async_submit_draining);
1439 while (atomic_read(&root->fs_info->nr_async_submits) ||
1440 atomic_read(&root->fs_info->async_delalloc_pages)) {
1441 wait_event(root->fs_info->async_submit_wait,
1442 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
1443 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
1445 atomic_dec(&root->fs_info->async_submit_draining);
1448 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1449 btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
1455 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1456 mutex_lock(&inode->i_mutex);
1457 BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
1458 mutex_unlock(&inode->i_mutex);
1466 static noinline int btrfs_ioctl_resize(struct file *file,
1472 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
1473 struct btrfs_ioctl_vol_args *vol_args;
1474 struct btrfs_trans_handle *trans;
1475 struct btrfs_device *device = NULL;
1478 char *devstr = NULL;
1482 if (!capable(CAP_SYS_ADMIN))
1485 ret = mnt_want_write_file(file);
1489 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1491 mnt_drop_write_file(file);
1492 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1495 mutex_lock(&root->fs_info->volume_mutex);
1496 vol_args = memdup_user(arg, sizeof(*vol_args));
1497 if (IS_ERR(vol_args)) {
1498 ret = PTR_ERR(vol_args);
1502 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1504 sizestr = vol_args->name;
1505 devstr = strchr(sizestr, ':');
1507 sizestr = devstr + 1;
1509 devstr = vol_args->name;
1510 ret = kstrtoull(devstr, 10, &devid);
1517 btrfs_info(root->fs_info, "resizing devid %llu", devid);
1520 device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
1522 btrfs_info(root->fs_info, "resizer unable to find device %llu",
1528 if (!device->writeable) {
1529 btrfs_info(root->fs_info,
1530 "resizer unable to apply on readonly device %llu",
1536 if (!strcmp(sizestr, "max"))
1537 new_size = device->bdev->bd_inode->i_size;
1539 if (sizestr[0] == '-') {
1542 } else if (sizestr[0] == '+') {
1546 new_size = memparse(sizestr, &retptr);
1547 if (*retptr != '\0' || new_size == 0) {
1553 if (device->is_tgtdev_for_dev_replace) {
1558 old_size = btrfs_device_get_total_bytes(device);
1561 if (new_size > old_size) {
1565 new_size = old_size - new_size;
1566 } else if (mod > 0) {
1567 if (new_size > ULLONG_MAX - old_size) {
1571 new_size = old_size + new_size;
1574 if (new_size < 256 * 1024 * 1024) {
1578 if (new_size > device->bdev->bd_inode->i_size) {
1583 new_size = div_u64(new_size, root->sectorsize);
1584 new_size *= root->sectorsize;
1586 btrfs_info_in_rcu(root->fs_info, "new size for %s is %llu",
1587 rcu_str_deref(device->name), new_size);
1589 if (new_size > old_size) {
1590 trans = btrfs_start_transaction(root, 0);
1591 if (IS_ERR(trans)) {
1592 ret = PTR_ERR(trans);
1595 ret = btrfs_grow_device(trans, device, new_size);
1596 btrfs_commit_transaction(trans, root);
1597 } else if (new_size < old_size) {
1598 ret = btrfs_shrink_device(device, new_size);
1599 } /* equal, nothing need to do */
1604 mutex_unlock(&root->fs_info->volume_mutex);
1605 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
1606 mnt_drop_write_file(file);
1610 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1611 char *name, unsigned long fd, int subvol,
1612 u64 *transid, bool readonly,
1613 struct btrfs_qgroup_inherit *inherit)
1618 ret = mnt_want_write_file(file);
1622 namelen = strlen(name);
1623 if (strchr(name, '/')) {
1625 goto out_drop_write;
1628 if (name[0] == '.' &&
1629 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1631 goto out_drop_write;
1635 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1636 NULL, transid, readonly, inherit);
1638 struct fd src = fdget(fd);
1639 struct inode *src_inode;
1642 goto out_drop_write;
1645 src_inode = file_inode(src.file);
1646 if (src_inode->i_sb != file_inode(file)->i_sb) {
1647 btrfs_info(BTRFS_I(src_inode)->root->fs_info,
1648 "Snapshot src from another FS");
1650 } else if (!inode_owner_or_capable(src_inode)) {
1652 * Subvolume creation is not restricted, but snapshots
1653 * are limited to own subvolumes only
1657 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1658 BTRFS_I(src_inode)->root,
1659 transid, readonly, inherit);
1664 mnt_drop_write_file(file);
1669 static noinline int btrfs_ioctl_snap_create(struct file *file,
1670 void __user *arg, int subvol)
1672 struct btrfs_ioctl_vol_args *vol_args;
1675 vol_args = memdup_user(arg, sizeof(*vol_args));
1676 if (IS_ERR(vol_args))
1677 return PTR_ERR(vol_args);
1678 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1680 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1681 vol_args->fd, subvol,
1688 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1689 void __user *arg, int subvol)
1691 struct btrfs_ioctl_vol_args_v2 *vol_args;
1695 bool readonly = false;
1696 struct btrfs_qgroup_inherit *inherit = NULL;
1698 vol_args = memdup_user(arg, sizeof(*vol_args));
1699 if (IS_ERR(vol_args))
1700 return PTR_ERR(vol_args);
1701 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1703 if (vol_args->flags &
1704 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1705 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1710 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1712 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1714 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1715 if (vol_args->size > PAGE_CACHE_SIZE) {
1719 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1720 if (IS_ERR(inherit)) {
1721 ret = PTR_ERR(inherit);
1726 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1727 vol_args->fd, subvol, ptr,
1732 if (ptr && copy_to_user(arg +
1733 offsetof(struct btrfs_ioctl_vol_args_v2,
1745 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1748 struct inode *inode = file_inode(file);
1749 struct btrfs_root *root = BTRFS_I(inode)->root;
1753 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1756 down_read(&root->fs_info->subvol_sem);
1757 if (btrfs_root_readonly(root))
1758 flags |= BTRFS_SUBVOL_RDONLY;
1759 up_read(&root->fs_info->subvol_sem);
1761 if (copy_to_user(arg, &flags, sizeof(flags)))
1767 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1770 struct inode *inode = file_inode(file);
1771 struct btrfs_root *root = BTRFS_I(inode)->root;
1772 struct btrfs_trans_handle *trans;
1777 if (!inode_owner_or_capable(inode))
1780 ret = mnt_want_write_file(file);
1784 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
1786 goto out_drop_write;
1789 if (copy_from_user(&flags, arg, sizeof(flags))) {
1791 goto out_drop_write;
1794 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1796 goto out_drop_write;
1799 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1801 goto out_drop_write;
1804 down_write(&root->fs_info->subvol_sem);
1807 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1810 root_flags = btrfs_root_flags(&root->root_item);
1811 if (flags & BTRFS_SUBVOL_RDONLY) {
1812 btrfs_set_root_flags(&root->root_item,
1813 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1816 * Block RO -> RW transition if this subvolume is involved in
1819 spin_lock(&root->root_item_lock);
1820 if (root->send_in_progress == 0) {
1821 btrfs_set_root_flags(&root->root_item,
1822 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1823 spin_unlock(&root->root_item_lock);
1825 spin_unlock(&root->root_item_lock);
1826 btrfs_warn(root->fs_info,
1827 "Attempt to set subvolume %llu read-write during send",
1828 root->root_key.objectid);
1834 trans = btrfs_start_transaction(root, 1);
1835 if (IS_ERR(trans)) {
1836 ret = PTR_ERR(trans);
1840 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1841 &root->root_key, &root->root_item);
1843 btrfs_commit_transaction(trans, root);
1846 btrfs_set_root_flags(&root->root_item, root_flags);
1848 up_write(&root->fs_info->subvol_sem);
1850 mnt_drop_write_file(file);
1856 * helper to check if the subvolume references other subvolumes
1858 static noinline int may_destroy_subvol(struct btrfs_root *root)
1860 struct btrfs_path *path;
1861 struct btrfs_dir_item *di;
1862 struct btrfs_key key;
1866 path = btrfs_alloc_path();
1870 /* Make sure this root isn't set as the default subvol */
1871 dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
1872 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root, path,
1873 dir_id, "default", 7, 0);
1874 if (di && !IS_ERR(di)) {
1875 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1876 if (key.objectid == root->root_key.objectid) {
1878 btrfs_err(root->fs_info, "deleting default subvolume "
1879 "%llu is not allowed", key.objectid);
1882 btrfs_release_path(path);
1885 key.objectid = root->root_key.objectid;
1886 key.type = BTRFS_ROOT_REF_KEY;
1887 key.offset = (u64)-1;
1889 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
1896 if (path->slots[0] > 0) {
1898 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1899 if (key.objectid == root->root_key.objectid &&
1900 key.type == BTRFS_ROOT_REF_KEY)
1904 btrfs_free_path(path);
1908 static noinline int key_in_sk(struct btrfs_key *key,
1909 struct btrfs_ioctl_search_key *sk)
1911 struct btrfs_key test;
1914 test.objectid = sk->min_objectid;
1915 test.type = sk->min_type;
1916 test.offset = sk->min_offset;
1918 ret = btrfs_comp_cpu_keys(key, &test);
1922 test.objectid = sk->max_objectid;
1923 test.type = sk->max_type;
1924 test.offset = sk->max_offset;
1926 ret = btrfs_comp_cpu_keys(key, &test);
1932 static noinline int copy_to_sk(struct btrfs_root *root,
1933 struct btrfs_path *path,
1934 struct btrfs_key *key,
1935 struct btrfs_ioctl_search_key *sk,
1938 unsigned long *sk_offset,
1942 struct extent_buffer *leaf;
1943 struct btrfs_ioctl_search_header sh;
1944 struct btrfs_key test;
1945 unsigned long item_off;
1946 unsigned long item_len;
1952 leaf = path->nodes[0];
1953 slot = path->slots[0];
1954 nritems = btrfs_header_nritems(leaf);
1956 if (btrfs_header_generation(leaf) > sk->max_transid) {
1960 found_transid = btrfs_header_generation(leaf);
1962 for (i = slot; i < nritems; i++) {
1963 item_off = btrfs_item_ptr_offset(leaf, i);
1964 item_len = btrfs_item_size_nr(leaf, i);
1966 btrfs_item_key_to_cpu(leaf, key, i);
1967 if (!key_in_sk(key, sk))
1970 if (sizeof(sh) + item_len > *buf_size) {
1977 * return one empty item back for v1, which does not
1981 *buf_size = sizeof(sh) + item_len;
1986 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
1991 sh.objectid = key->objectid;
1992 sh.offset = key->offset;
1993 sh.type = key->type;
1995 sh.transid = found_transid;
1997 /* copy search result header */
1998 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2003 *sk_offset += sizeof(sh);
2006 char __user *up = ubuf + *sk_offset;
2008 if (read_extent_buffer_to_user(leaf, up,
2009 item_off, item_len)) {
2014 *sk_offset += item_len;
2018 if (ret) /* -EOVERFLOW from above */
2021 if (*num_found >= sk->nr_items) {
2028 test.objectid = sk->max_objectid;
2029 test.type = sk->max_type;
2030 test.offset = sk->max_offset;
2031 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2033 else if (key->offset < (u64)-1)
2035 else if (key->type < (u8)-1) {
2038 } else if (key->objectid < (u64)-1) {
2046 * 0: all items from this leaf copied, continue with next
2047 * 1: * more items can be copied, but unused buffer is too small
2048 * * all items were found
2049 * Either way, it will stops the loop which iterates to the next
2051 * -EOVERFLOW: item was to large for buffer
2052 * -EFAULT: could not copy extent buffer back to userspace
2057 static noinline int search_ioctl(struct inode *inode,
2058 struct btrfs_ioctl_search_key *sk,
2062 struct btrfs_root *root;
2063 struct btrfs_key key;
2064 struct btrfs_path *path;
2065 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
2068 unsigned long sk_offset = 0;
2070 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2071 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2075 path = btrfs_alloc_path();
2079 if (sk->tree_id == 0) {
2080 /* search the root of the inode that was passed */
2081 root = BTRFS_I(inode)->root;
2083 key.objectid = sk->tree_id;
2084 key.type = BTRFS_ROOT_ITEM_KEY;
2085 key.offset = (u64)-1;
2086 root = btrfs_read_fs_root_no_name(info, &key);
2088 btrfs_err(info, "could not find root %llu",
2090 btrfs_free_path(path);
2095 key.objectid = sk->min_objectid;
2096 key.type = sk->min_type;
2097 key.offset = sk->min_offset;
2100 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2106 ret = copy_to_sk(root, path, &key, sk, buf_size, ubuf,
2107 &sk_offset, &num_found);
2108 btrfs_release_path(path);
2116 sk->nr_items = num_found;
2117 btrfs_free_path(path);
2121 static noinline int btrfs_ioctl_tree_search(struct file *file,
2124 struct btrfs_ioctl_search_args __user *uargs;
2125 struct btrfs_ioctl_search_key sk;
2126 struct inode *inode;
2130 if (!capable(CAP_SYS_ADMIN))
2133 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2135 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2138 buf_size = sizeof(uargs->buf);
2140 inode = file_inode(file);
2141 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2144 * In the origin implementation an overflow is handled by returning a
2145 * search header with a len of zero, so reset ret.
2147 if (ret == -EOVERFLOW)
2150 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2155 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2158 struct btrfs_ioctl_search_args_v2 __user *uarg;
2159 struct btrfs_ioctl_search_args_v2 args;
2160 struct inode *inode;
2163 const size_t buf_limit = 16 * 1024 * 1024;
2165 if (!capable(CAP_SYS_ADMIN))
2168 /* copy search header and buffer size */
2169 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2170 if (copy_from_user(&args, uarg, sizeof(args)))
2173 buf_size = args.buf_size;
2175 if (buf_size < sizeof(struct btrfs_ioctl_search_header))
2178 /* limit result size to 16MB */
2179 if (buf_size > buf_limit)
2180 buf_size = buf_limit;
2182 inode = file_inode(file);
2183 ret = search_ioctl(inode, &args.key, &buf_size,
2184 (char *)(&uarg->buf[0]));
2185 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2187 else if (ret == -EOVERFLOW &&
2188 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2195 * Search INODE_REFs to identify path name of 'dirid' directory
2196 * in a 'tree_id' tree. and sets path name to 'name'.
2198 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2199 u64 tree_id, u64 dirid, char *name)
2201 struct btrfs_root *root;
2202 struct btrfs_key key;
2208 struct btrfs_inode_ref *iref;
2209 struct extent_buffer *l;
2210 struct btrfs_path *path;
2212 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2217 path = btrfs_alloc_path();
2221 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
2223 key.objectid = tree_id;
2224 key.type = BTRFS_ROOT_ITEM_KEY;
2225 key.offset = (u64)-1;
2226 root = btrfs_read_fs_root_no_name(info, &key);
2228 btrfs_err(info, "could not find root %llu", tree_id);
2233 key.objectid = dirid;
2234 key.type = BTRFS_INODE_REF_KEY;
2235 key.offset = (u64)-1;
2238 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2242 ret = btrfs_previous_item(root, path, dirid,
2243 BTRFS_INODE_REF_KEY);
2253 slot = path->slots[0];
2254 btrfs_item_key_to_cpu(l, &key, slot);
2256 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2257 len = btrfs_inode_ref_name_len(l, iref);
2259 total_len += len + 1;
2261 ret = -ENAMETOOLONG;
2266 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2268 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2271 btrfs_release_path(path);
2272 key.objectid = key.offset;
2273 key.offset = (u64)-1;
2274 dirid = key.objectid;
2276 memmove(name, ptr, total_len);
2277 name[total_len] = '\0';
2280 btrfs_free_path(path);
2284 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2287 struct btrfs_ioctl_ino_lookup_args *args;
2288 struct inode *inode;
2291 args = memdup_user(argp, sizeof(*args));
2293 return PTR_ERR(args);
2295 inode = file_inode(file);
2298 * Unprivileged query to obtain the containing subvolume root id. The
2299 * path is reset so it's consistent with btrfs_search_path_in_tree.
2301 if (args->treeid == 0)
2302 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2304 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2309 if (!capable(CAP_SYS_ADMIN)) {
2314 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2315 args->treeid, args->objectid,
2319 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2326 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2329 struct dentry *parent = file->f_path.dentry;
2330 struct dentry *dentry;
2331 struct inode *dir = d_inode(parent);
2332 struct inode *inode;
2333 struct btrfs_root *root = BTRFS_I(dir)->root;
2334 struct btrfs_root *dest = NULL;
2335 struct btrfs_ioctl_vol_args *vol_args;
2336 struct btrfs_trans_handle *trans;
2337 struct btrfs_block_rsv block_rsv;
2339 u64 qgroup_reserved;
2344 vol_args = memdup_user(arg, sizeof(*vol_args));
2345 if (IS_ERR(vol_args))
2346 return PTR_ERR(vol_args);
2348 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2349 namelen = strlen(vol_args->name);
2350 if (strchr(vol_args->name, '/') ||
2351 strncmp(vol_args->name, "..", namelen) == 0) {
2356 err = mnt_want_write_file(file);
2361 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
2363 goto out_drop_write;
2364 dentry = lookup_one_len(vol_args->name, parent, namelen);
2365 if (IS_ERR(dentry)) {
2366 err = PTR_ERR(dentry);
2367 goto out_unlock_dir;
2370 if (d_really_is_negative(dentry)) {
2375 inode = d_inode(dentry);
2376 dest = BTRFS_I(inode)->root;
2377 if (!capable(CAP_SYS_ADMIN)) {
2379 * Regular user. Only allow this with a special mount
2380 * option, when the user has write+exec access to the
2381 * subvol root, and when rmdir(2) would have been
2384 * Note that this is _not_ check that the subvol is
2385 * empty or doesn't contain data that we wouldn't
2386 * otherwise be able to delete.
2388 * Users who want to delete empty subvols should try
2392 if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
2396 * Do not allow deletion if the parent dir is the same
2397 * as the dir to be deleted. That means the ioctl
2398 * must be called on the dentry referencing the root
2399 * of the subvol, not a random directory contained
2406 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2411 /* check if subvolume may be deleted by a user */
2412 err = btrfs_may_delete(dir, dentry, 1);
2416 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
2421 mutex_lock(&inode->i_mutex);
2424 * Don't allow to delete a subvolume with send in progress. This is
2425 * inside the i_mutex so the error handling that has to drop the bit
2426 * again is not run concurrently.
2428 spin_lock(&dest->root_item_lock);
2429 root_flags = btrfs_root_flags(&dest->root_item);
2430 if (dest->send_in_progress == 0) {
2431 btrfs_set_root_flags(&dest->root_item,
2432 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
2433 spin_unlock(&dest->root_item_lock);
2435 spin_unlock(&dest->root_item_lock);
2436 btrfs_warn(root->fs_info,
2437 "Attempt to delete subvolume %llu during send",
2438 dest->root_key.objectid);
2440 goto out_unlock_inode;
2443 down_write(&root->fs_info->subvol_sem);
2445 err = may_destroy_subvol(dest);
2449 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
2451 * One for dir inode, two for dir entries, two for root
2454 err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
2455 5, &qgroup_reserved, true);
2459 trans = btrfs_start_transaction(root, 0);
2460 if (IS_ERR(trans)) {
2461 err = PTR_ERR(trans);
2464 trans->block_rsv = &block_rsv;
2465 trans->bytes_reserved = block_rsv.size;
2467 ret = btrfs_unlink_subvol(trans, root, dir,
2468 dest->root_key.objectid,
2469 dentry->d_name.name,
2470 dentry->d_name.len);
2473 btrfs_abort_transaction(trans, root, ret);
2477 btrfs_record_root_in_trans(trans, dest);
2479 memset(&dest->root_item.drop_progress, 0,
2480 sizeof(dest->root_item.drop_progress));
2481 dest->root_item.drop_level = 0;
2482 btrfs_set_root_refs(&dest->root_item, 0);
2484 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
2485 ret = btrfs_insert_orphan_item(trans,
2486 root->fs_info->tree_root,
2487 dest->root_key.objectid);
2489 btrfs_abort_transaction(trans, root, ret);
2495 ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
2496 dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
2497 dest->root_key.objectid);
2498 if (ret && ret != -ENOENT) {
2499 btrfs_abort_transaction(trans, root, ret);
2503 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
2504 ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
2505 dest->root_item.received_uuid,
2506 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
2507 dest->root_key.objectid);
2508 if (ret && ret != -ENOENT) {
2509 btrfs_abort_transaction(trans, root, ret);
2516 trans->block_rsv = NULL;
2517 trans->bytes_reserved = 0;
2518 ret = btrfs_end_transaction(trans, root);
2521 inode->i_flags |= S_DEAD;
2523 btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
2525 up_write(&root->fs_info->subvol_sem);
2527 spin_lock(&dest->root_item_lock);
2528 root_flags = btrfs_root_flags(&dest->root_item);
2529 btrfs_set_root_flags(&dest->root_item,
2530 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
2531 spin_unlock(&dest->root_item_lock);
2534 mutex_unlock(&inode->i_mutex);
2536 d_invalidate(dentry);
2537 btrfs_invalidate_inodes(dest);
2539 ASSERT(dest->send_in_progress == 0);
2542 if (dest->ino_cache_inode) {
2543 iput(dest->ino_cache_inode);
2544 dest->ino_cache_inode = NULL;
2550 mutex_unlock(&dir->i_mutex);
2552 mnt_drop_write_file(file);
2558 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2560 struct inode *inode = file_inode(file);
2561 struct btrfs_root *root = BTRFS_I(inode)->root;
2562 struct btrfs_ioctl_defrag_range_args *range;
2565 ret = mnt_want_write_file(file);
2569 if (btrfs_root_readonly(root)) {
2574 switch (inode->i_mode & S_IFMT) {
2576 if (!capable(CAP_SYS_ADMIN)) {
2580 ret = btrfs_defrag_root(root);
2583 ret = btrfs_defrag_root(root->fs_info->extent_root);
2586 if (!(file->f_mode & FMODE_WRITE)) {
2591 range = kzalloc(sizeof(*range), GFP_KERNEL);
2598 if (copy_from_user(range, argp,
2604 /* compression requires us to start the IO */
2605 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2606 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2607 range->extent_thresh = (u32)-1;
2610 /* the rest are all set to zero by kzalloc */
2611 range->len = (u64)-1;
2613 ret = btrfs_defrag_file(file_inode(file), file,
2623 mnt_drop_write_file(file);
2627 static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
2629 struct btrfs_ioctl_vol_args *vol_args;
2632 if (!capable(CAP_SYS_ADMIN))
2635 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2637 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2640 mutex_lock(&root->fs_info->volume_mutex);
2641 vol_args = memdup_user(arg, sizeof(*vol_args));
2642 if (IS_ERR(vol_args)) {
2643 ret = PTR_ERR(vol_args);
2647 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2648 ret = btrfs_init_new_device(root, vol_args->name);
2651 btrfs_info(root->fs_info, "disk added %s",vol_args->name);
2655 mutex_unlock(&root->fs_info->volume_mutex);
2656 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2660 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2662 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
2663 struct btrfs_ioctl_vol_args *vol_args;
2666 if (!capable(CAP_SYS_ADMIN))
2669 ret = mnt_want_write_file(file);
2673 vol_args = memdup_user(arg, sizeof(*vol_args));
2674 if (IS_ERR(vol_args)) {
2675 ret = PTR_ERR(vol_args);
2679 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2681 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2683 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2687 mutex_lock(&root->fs_info->volume_mutex);
2688 ret = btrfs_rm_device(root, vol_args->name);
2689 mutex_unlock(&root->fs_info->volume_mutex);
2690 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2693 btrfs_info(root->fs_info, "disk deleted %s",vol_args->name);
2698 mnt_drop_write_file(file);
2702 static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
2704 struct btrfs_ioctl_fs_info_args *fi_args;
2705 struct btrfs_device *device;
2706 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2709 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2713 mutex_lock(&fs_devices->device_list_mutex);
2714 fi_args->num_devices = fs_devices->num_devices;
2715 memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
2717 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2718 if (device->devid > fi_args->max_id)
2719 fi_args->max_id = device->devid;
2721 mutex_unlock(&fs_devices->device_list_mutex);
2723 fi_args->nodesize = root->fs_info->super_copy->nodesize;
2724 fi_args->sectorsize = root->fs_info->super_copy->sectorsize;
2725 fi_args->clone_alignment = root->fs_info->super_copy->sectorsize;
2727 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2734 static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
2736 struct btrfs_ioctl_dev_info_args *di_args;
2737 struct btrfs_device *dev;
2738 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2740 char *s_uuid = NULL;
2742 di_args = memdup_user(arg, sizeof(*di_args));
2743 if (IS_ERR(di_args))
2744 return PTR_ERR(di_args);
2746 if (!btrfs_is_empty_uuid(di_args->uuid))
2747 s_uuid = di_args->uuid;
2749 mutex_lock(&fs_devices->device_list_mutex);
2750 dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
2757 di_args->devid = dev->devid;
2758 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
2759 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
2760 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2762 struct rcu_string *name;
2765 name = rcu_dereference(dev->name);
2766 strncpy(di_args->path, name->str, sizeof(di_args->path));
2768 di_args->path[sizeof(di_args->path) - 1] = 0;
2770 di_args->path[0] = '\0';
2774 mutex_unlock(&fs_devices->device_list_mutex);
2775 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
2782 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2786 page = grab_cache_page(inode->i_mapping, index);
2788 return ERR_PTR(-ENOMEM);
2790 if (!PageUptodate(page)) {
2793 ret = btrfs_readpage(NULL, page);
2795 return ERR_PTR(ret);
2797 if (!PageUptodate(page)) {
2799 page_cache_release(page);
2800 return ERR_PTR(-EIO);
2802 if (page->mapping != inode->i_mapping) {
2804 page_cache_release(page);
2805 return ERR_PTR(-EAGAIN);
2812 static int gather_extent_pages(struct inode *inode, struct page **pages,
2813 int num_pages, u64 off)
2816 pgoff_t index = off >> PAGE_CACHE_SHIFT;
2818 for (i = 0; i < num_pages; i++) {
2820 pages[i] = extent_same_get_page(inode, index + i);
2821 if (IS_ERR(pages[i])) {
2822 int err = PTR_ERR(pages[i]);
2833 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2834 bool retry_range_locking)
2837 * Do any pending delalloc/csum calculations on inode, one way or
2838 * another, and lock file content.
2839 * The locking order is:
2842 * 2) range in the inode's io tree
2845 struct btrfs_ordered_extent *ordered;
2846 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2847 ordered = btrfs_lookup_first_ordered_extent(inode,
2850 ordered->file_offset + ordered->len <= off ||
2851 ordered->file_offset >= off + len) &&
2852 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
2853 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
2855 btrfs_put_ordered_extent(ordered);
2858 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2860 btrfs_put_ordered_extent(ordered);
2861 if (!retry_range_locking)
2863 btrfs_wait_ordered_range(inode, off, len);
2868 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
2870 mutex_unlock(&inode1->i_mutex);
2871 mutex_unlock(&inode2->i_mutex);
2874 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
2876 if (inode1 < inode2)
2877 swap(inode1, inode2);
2879 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
2880 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
2883 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2884 struct inode *inode2, u64 loff2, u64 len)
2886 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2887 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2890 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2891 struct inode *inode2, u64 loff2, u64 len,
2892 bool retry_range_locking)
2896 if (inode1 < inode2) {
2897 swap(inode1, inode2);
2900 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
2903 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
2905 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
2912 struct page **src_pages;
2913 struct page **dst_pages;
2916 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2921 for (i = 0; i < cmp->num_pages; i++) {
2922 pg = cmp->src_pages[i];
2925 page_cache_release(pg);
2927 pg = cmp->dst_pages[i];
2930 page_cache_release(pg);
2933 kfree(cmp->src_pages);
2934 kfree(cmp->dst_pages);
2937 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2938 struct inode *dst, u64 dst_loff,
2939 u64 len, struct cmp_pages *cmp)
2942 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
2943 struct page **src_pgarr, **dst_pgarr;
2946 * We must gather up all the pages before we initiate our
2947 * extent locking. We use an array for the page pointers. Size
2948 * of the array is bounded by len, which is in turn bounded by
2949 * BTRFS_MAX_DEDUPE_LEN.
2951 src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
2952 dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
2953 if (!src_pgarr || !dst_pgarr) {
2958 cmp->num_pages = num_pages;
2959 cmp->src_pages = src_pgarr;
2960 cmp->dst_pages = dst_pgarr;
2962 ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
2966 ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
2970 btrfs_cmp_data_free(cmp);
2974 static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2975 u64 dst_loff, u64 len, struct cmp_pages *cmp)
2979 struct page *src_page, *dst_page;
2980 unsigned int cmp_len = PAGE_CACHE_SIZE;
2981 void *addr, *dst_addr;
2985 if (len < PAGE_CACHE_SIZE)
2988 BUG_ON(i >= cmp->num_pages);
2990 src_page = cmp->src_pages[i];
2991 dst_page = cmp->dst_pages[i];
2992 ASSERT(PageLocked(src_page));
2993 ASSERT(PageLocked(dst_page));
2995 addr = kmap_atomic(src_page);
2996 dst_addr = kmap_atomic(dst_page);
2998 flush_dcache_page(src_page);
2999 flush_dcache_page(dst_page);
3001 if (memcmp(addr, dst_addr, cmp_len))
3002 ret = BTRFS_SAME_DATA_DIFFERS;
3004 kunmap_atomic(addr);
3005 kunmap_atomic(dst_addr);
3017 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3021 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3023 if (off + olen > inode->i_size || off + olen < off)
3026 /* if we extend to eof, continue to block boundary */
3027 if (off + len == inode->i_size)
3028 *plen = len = ALIGN(inode->i_size, bs) - off;
3030 /* Check that we are block aligned - btrfs_clone() requires this */
3031 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3037 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3038 struct inode *dst, u64 dst_loff)
3042 struct cmp_pages cmp;
3044 u64 same_lock_start = 0;
3045 u64 same_lock_len = 0;
3054 mutex_lock(&src->i_mutex);
3056 ret = extent_same_check_offsets(src, loff, &len, olen);
3061 * Single inode case wants the same checks, except we
3062 * don't want our length pushed out past i_size as
3063 * comparing that data range makes no sense.
3065 * extent_same_check_offsets() will do this for an
3066 * unaligned length at i_size, so catch it here and
3067 * reject the request.
3069 * This effectively means we require aligned extents
3070 * for the single-inode case, whereas the other cases
3071 * allow an unaligned length so long as it ends at
3079 /* Check for overlapping ranges */
3080 if (dst_loff + len > loff && dst_loff < loff + len) {
3085 same_lock_start = min_t(u64, loff, dst_loff);
3086 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3088 btrfs_double_inode_lock(src, dst);
3090 ret = extent_same_check_offsets(src, loff, &len, olen);
3094 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3099 /* don't make the dst file partly checksummed */
3100 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3101 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3107 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3112 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3115 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3118 * If one of the inodes has dirty pages in the respective range or
3119 * ordered extents, we need to flush dellaloc and wait for all ordered
3120 * extents in the range. We must unlock the pages and the ranges in the
3121 * io trees to avoid deadlocks when flushing delalloc (requires locking
3122 * pages) and when waiting for ordered extents to complete (they require
3125 if (ret == -EAGAIN) {
3127 * Ranges in the io trees already unlocked. Now unlock all
3128 * pages before waiting for all IO to complete.
3130 btrfs_cmp_data_free(&cmp);
3132 btrfs_wait_ordered_range(src, same_lock_start,
3135 btrfs_wait_ordered_range(src, loff, len);
3136 btrfs_wait_ordered_range(dst, dst_loff, len);
3142 /* ranges in the io trees already unlocked */
3143 btrfs_cmp_data_free(&cmp);
3147 /* pass original length for comparison so we stay within i_size */
3148 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
3150 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3153 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3154 same_lock_start + same_lock_len - 1);
3156 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3158 btrfs_cmp_data_free(&cmp);
3161 mutex_unlock(&src->i_mutex);
3163 btrfs_double_inode_unlock(src, dst);
3168 #define BTRFS_MAX_DEDUPE_LEN (16 * 1024 * 1024)
3170 static long btrfs_ioctl_file_extent_same(struct file *file,
3171 struct btrfs_ioctl_same_args __user *argp)
3173 struct btrfs_ioctl_same_args *same = NULL;
3174 struct btrfs_ioctl_same_extent_info *info;
3175 struct inode *src = file_inode(file);
3181 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3182 bool is_admin = capable(CAP_SYS_ADMIN);
3185 if (!(file->f_mode & FMODE_READ))
3188 ret = mnt_want_write_file(file);
3192 if (get_user(count, &argp->dest_count)) {
3197 size = offsetof(struct btrfs_ioctl_same_args __user, info[count]);
3199 same = memdup_user(argp, size);
3202 ret = PTR_ERR(same);
3207 off = same->logical_offset;
3211 * Limit the total length we will dedupe for each operation.
3212 * This is intended to bound the total time spent in this
3213 * ioctl to something sane.
3215 if (len > BTRFS_MAX_DEDUPE_LEN)
3216 len = BTRFS_MAX_DEDUPE_LEN;
3218 if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) {
3220 * Btrfs does not support blocksize < page_size. As a
3221 * result, btrfs_cmp_data() won't correctly handle
3222 * this situation without an update.
3229 if (S_ISDIR(src->i_mode))
3233 if (!S_ISREG(src->i_mode))
3236 /* pre-format output fields to sane values */
3237 for (i = 0; i < count; i++) {
3238 same->info[i].bytes_deduped = 0ULL;
3239 same->info[i].status = 0;
3242 for (i = 0, info = same->info; i < count; i++, info++) {
3244 struct fd dst_file = fdget(info->fd);
3245 if (!dst_file.file) {
3246 info->status = -EBADF;
3249 dst = file_inode(dst_file.file);
3251 if (!(is_admin || (dst_file.file->f_mode & FMODE_WRITE))) {
3252 info->status = -EINVAL;
3253 } else if (file->f_path.mnt != dst_file.file->f_path.mnt) {
3254 info->status = -EXDEV;
3255 } else if (S_ISDIR(dst->i_mode)) {
3256 info->status = -EISDIR;
3257 } else if (!S_ISREG(dst->i_mode)) {
3258 info->status = -EACCES;
3260 info->status = btrfs_extent_same(src, off, len, dst,
3261 info->logical_offset);
3262 if (info->status == 0)
3263 info->bytes_deduped += len;
3268 ret = copy_to_user(argp, same, size);
3273 mnt_drop_write_file(file);
3278 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3279 struct inode *inode,
3285 struct btrfs_root *root = BTRFS_I(inode)->root;
3288 inode_inc_iversion(inode);
3289 if (!no_time_update)
3290 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
3292 * We round up to the block size at eof when determining which
3293 * extents to clone above, but shouldn't round up the file size.
3295 if (endoff > destoff + olen)
3296 endoff = destoff + olen;
3297 if (endoff > inode->i_size)
3298 btrfs_i_size_write(inode, endoff);
3300 ret = btrfs_update_inode(trans, root, inode);
3302 btrfs_abort_transaction(trans, root, ret);
3303 btrfs_end_transaction(trans, root);
3306 ret = btrfs_end_transaction(trans, root);
3311 static void clone_update_extent_map(struct inode *inode,
3312 const struct btrfs_trans_handle *trans,
3313 const struct btrfs_path *path,
3314 const u64 hole_offset,
3317 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3318 struct extent_map *em;
3321 em = alloc_extent_map();
3323 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3324 &BTRFS_I(inode)->runtime_flags);
3329 struct btrfs_file_extent_item *fi;
3331 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3332 struct btrfs_file_extent_item);
3333 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3334 em->generation = -1;
3335 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3336 BTRFS_FILE_EXTENT_INLINE)
3337 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3338 &BTRFS_I(inode)->runtime_flags);
3340 em->start = hole_offset;
3342 em->ram_bytes = em->len;
3343 em->orig_start = hole_offset;
3344 em->block_start = EXTENT_MAP_HOLE;
3346 em->orig_block_len = 0;
3347 em->compress_type = BTRFS_COMPRESS_NONE;
3348 em->generation = trans->transid;
3352 write_lock(&em_tree->lock);
3353 ret = add_extent_mapping(em_tree, em, 1);
3354 write_unlock(&em_tree->lock);
3355 if (ret != -EEXIST) {
3356 free_extent_map(em);
3359 btrfs_drop_extent_cache(inode, em->start,
3360 em->start + em->len - 1, 0);
3364 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3365 &BTRFS_I(inode)->runtime_flags);
3369 * Make sure we do not end up inserting an inline extent into a file that has
3370 * already other (non-inline) extents. If a file has an inline extent it can
3371 * not have any other extents and the (single) inline extent must start at the
3372 * file offset 0. Failing to respect these rules will lead to file corruption,
3373 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3375 * We can have extents that have been already written to disk or we can have
3376 * dirty ranges still in delalloc, in which case the extent maps and items are
3377 * created only when we run delalloc, and the delalloc ranges might fall outside
3378 * the range we are currently locking in the inode's io tree. So we check the
3379 * inode's i_size because of that (i_size updates are done while holding the
3380 * i_mutex, which we are holding here).
3381 * We also check to see if the inode has a size not greater than "datal" but has
3382 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3383 * protected against such concurrent fallocate calls by the i_mutex).
3385 * If the file has no extents but a size greater than datal, do not allow the
3386 * copy because we would need turn the inline extent into a non-inline one (even
3387 * with NO_HOLES enabled). If we find our destination inode only has one inline
3388 * extent, just overwrite it with the source inline extent if its size is less
3389 * than the source extent's size, or we could copy the source inline extent's
3390 * data into the destination inode's inline extent if the later is greater then
3393 static int clone_copy_inline_extent(struct inode *src,
3395 struct btrfs_trans_handle *trans,
3396 struct btrfs_path *path,
3397 struct btrfs_key *new_key,
3398 const u64 drop_start,
3404 struct btrfs_root *root = BTRFS_I(dst)->root;
3405 const u64 aligned_end = ALIGN(new_key->offset + datal,
3408 struct btrfs_key key;
3410 if (new_key->offset > 0)
3413 key.objectid = btrfs_ino(dst);
3414 key.type = BTRFS_EXTENT_DATA_KEY;
3416 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3419 } else if (ret > 0) {
3420 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3421 ret = btrfs_next_leaf(root, path);
3425 goto copy_inline_extent;
3427 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3428 if (key.objectid == btrfs_ino(dst) &&
3429 key.type == BTRFS_EXTENT_DATA_KEY) {
3430 ASSERT(key.offset > 0);
3433 } else if (i_size_read(dst) <= datal) {
3434 struct btrfs_file_extent_item *ei;
3438 * If the file size is <= datal, make sure there are no other
3439 * extents following (can happen do to an fallocate call with
3440 * the flag FALLOC_FL_KEEP_SIZE).
3442 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3443 struct btrfs_file_extent_item);
3445 * If it's an inline extent, it can not have other extents
3448 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3449 BTRFS_FILE_EXTENT_INLINE)
3450 goto copy_inline_extent;
3452 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3453 if (ext_len > aligned_end)
3456 ret = btrfs_next_item(root, path);
3459 } else if (ret == 0) {
3460 btrfs_item_key_to_cpu(path->nodes[0], &key,
3462 if (key.objectid == btrfs_ino(dst) &&
3463 key.type == BTRFS_EXTENT_DATA_KEY)
3470 * We have no extent items, or we have an extent at offset 0 which may
3471 * or may not be inlined. All these cases are dealt the same way.
3473 if (i_size_read(dst) > datal) {
3475 * If the destination inode has an inline extent...
3476 * This would require copying the data from the source inline
3477 * extent into the beginning of the destination's inline extent.
3478 * But this is really complex, both extents can be compressed
3479 * or just one of them, which would require decompressing and
3480 * re-compressing data (which could increase the new compressed
3481 * size, not allowing the compressed data to fit anymore in an
3483 * So just don't support this case for now (it should be rare,
3484 * we are not really saving space when cloning inline extents).
3489 btrfs_release_path(path);
3490 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3493 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3498 const u32 start = btrfs_file_extent_calc_inline_size(0);
3500 memmove(inline_data + start, inline_data + start + skip, datal);
3503 write_extent_buffer(path->nodes[0], inline_data,
3504 btrfs_item_ptr_offset(path->nodes[0],
3507 inode_add_bytes(dst, datal);
3513 * btrfs_clone() - clone a range from inode file to another
3515 * @src: Inode to clone from
3516 * @inode: Inode to clone to
3517 * @off: Offset within source to start clone from
3518 * @olen: Original length, passed by user, of range to clone
3519 * @olen_aligned: Block-aligned value of olen
3520 * @destoff: Offset within @inode to start clone
3521 * @no_time_update: Whether to update mtime/ctime on the target inode
3523 static int btrfs_clone(struct inode *src, struct inode *inode,
3524 const u64 off, const u64 olen, const u64 olen_aligned,
3525 const u64 destoff, int no_time_update)
3527 struct btrfs_root *root = BTRFS_I(inode)->root;
3528 struct btrfs_path *path = NULL;
3529 struct extent_buffer *leaf;
3530 struct btrfs_trans_handle *trans;
3532 struct btrfs_key key;
3536 const u64 len = olen_aligned;
3537 u64 last_dest_end = destoff;
3540 buf = vmalloc(root->nodesize);
3544 path = btrfs_alloc_path();
3552 key.objectid = btrfs_ino(src);
3553 key.type = BTRFS_EXTENT_DATA_KEY;
3557 u64 next_key_min_offset = key.offset + 1;
3560 * note the key will change type as we walk through the
3563 path->leave_spinning = 1;
3564 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3569 * First search, if no extent item that starts at offset off was
3570 * found but the previous item is an extent item, it's possible
3571 * it might overlap our target range, therefore process it.
3573 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3574 btrfs_item_key_to_cpu(path->nodes[0], &key,
3575 path->slots[0] - 1);
3576 if (key.type == BTRFS_EXTENT_DATA_KEY)
3580 nritems = btrfs_header_nritems(path->nodes[0]);
3582 if (path->slots[0] >= nritems) {
3583 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3588 nritems = btrfs_header_nritems(path->nodes[0]);
3590 leaf = path->nodes[0];
3591 slot = path->slots[0];
3593 btrfs_item_key_to_cpu(leaf, &key, slot);
3594 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3595 key.objectid != btrfs_ino(src))
3598 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3599 struct btrfs_file_extent_item *extent;
3602 struct btrfs_key new_key;
3603 u64 disko = 0, diskl = 0;
3604 u64 datao = 0, datal = 0;
3608 extent = btrfs_item_ptr(leaf, slot,
3609 struct btrfs_file_extent_item);
3610 comp = btrfs_file_extent_compression(leaf, extent);
3611 type = btrfs_file_extent_type(leaf, extent);
3612 if (type == BTRFS_FILE_EXTENT_REG ||
3613 type == BTRFS_FILE_EXTENT_PREALLOC) {
3614 disko = btrfs_file_extent_disk_bytenr(leaf,
3616 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3618 datao = btrfs_file_extent_offset(leaf, extent);
3619 datal = btrfs_file_extent_num_bytes(leaf,
3621 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3622 /* take upper bound, may be compressed */
3623 datal = btrfs_file_extent_ram_bytes(leaf,
3628 * The first search might have left us at an extent
3629 * item that ends before our target range's start, can
3630 * happen if we have holes and NO_HOLES feature enabled.
3632 if (key.offset + datal <= off) {
3635 } else if (key.offset >= off + len) {
3638 next_key_min_offset = key.offset + datal;
3639 size = btrfs_item_size_nr(leaf, slot);
3640 read_extent_buffer(leaf, buf,
3641 btrfs_item_ptr_offset(leaf, slot),
3644 btrfs_release_path(path);
3645 path->leave_spinning = 0;
3647 memcpy(&new_key, &key, sizeof(new_key));
3648 new_key.objectid = btrfs_ino(inode);
3649 if (off <= key.offset)
3650 new_key.offset = key.offset + destoff - off;
3652 new_key.offset = destoff;
3655 * Deal with a hole that doesn't have an extent item
3656 * that represents it (NO_HOLES feature enabled).
3657 * This hole is either in the middle of the cloning
3658 * range or at the beginning (fully overlaps it or
3659 * partially overlaps it).
3661 if (new_key.offset != last_dest_end)
3662 drop_start = last_dest_end;
3664 drop_start = new_key.offset;
3667 * 1 - adjusting old extent (we may have to split it)
3668 * 1 - add new extent
3671 trans = btrfs_start_transaction(root, 3);
3672 if (IS_ERR(trans)) {
3673 ret = PTR_ERR(trans);
3677 if (type == BTRFS_FILE_EXTENT_REG ||
3678 type == BTRFS_FILE_EXTENT_PREALLOC) {
3680 * a | --- range to clone ---| b
3681 * | ------------- extent ------------- |
3684 /* subtract range b */
3685 if (key.offset + datal > off + len)
3686 datal = off + len - key.offset;
3688 /* subtract range a */
3689 if (off > key.offset) {
3690 datao += off - key.offset;
3691 datal -= off - key.offset;
3694 ret = btrfs_drop_extents(trans, root, inode,
3696 new_key.offset + datal,
3699 if (ret != -EOPNOTSUPP)
3700 btrfs_abort_transaction(trans,
3702 btrfs_end_transaction(trans, root);
3706 ret = btrfs_insert_empty_item(trans, root, path,
3709 btrfs_abort_transaction(trans, root,
3711 btrfs_end_transaction(trans, root);
3715 leaf = path->nodes[0];
3716 slot = path->slots[0];
3717 write_extent_buffer(leaf, buf,
3718 btrfs_item_ptr_offset(leaf, slot),
3721 extent = btrfs_item_ptr(leaf, slot,
3722 struct btrfs_file_extent_item);
3724 /* disko == 0 means it's a hole */
3728 btrfs_set_file_extent_offset(leaf, extent,
3730 btrfs_set_file_extent_num_bytes(leaf, extent,
3734 inode_add_bytes(inode, datal);
3735 ret = btrfs_inc_extent_ref(trans, root,
3737 root->root_key.objectid,
3739 new_key.offset - datao);
3741 btrfs_abort_transaction(trans,
3744 btrfs_end_transaction(trans,
3750 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3754 if (off > key.offset) {
3755 skip = off - key.offset;
3756 new_key.offset += skip;
3759 if (key.offset + datal > off + len)
3760 trim = key.offset + datal - (off + len);
3762 if (comp && (skip || trim)) {
3764 btrfs_end_transaction(trans, root);
3767 size -= skip + trim;
3768 datal -= skip + trim;
3770 ret = clone_copy_inline_extent(src, inode,
3777 if (ret != -EOPNOTSUPP)
3778 btrfs_abort_transaction(trans,
3781 btrfs_end_transaction(trans, root);
3784 leaf = path->nodes[0];
3785 slot = path->slots[0];
3788 /* If we have an implicit hole (NO_HOLES feature). */
3789 if (drop_start < new_key.offset)
3790 clone_update_extent_map(inode, trans,
3792 new_key.offset - drop_start);
3794 clone_update_extent_map(inode, trans, path, 0, 0);
3796 btrfs_mark_buffer_dirty(leaf);
3797 btrfs_release_path(path);
3799 last_dest_end = ALIGN(new_key.offset + datal,
3801 ret = clone_finish_inode_update(trans, inode,
3807 if (new_key.offset + datal >= destoff + len)
3810 btrfs_release_path(path);
3811 key.offset = next_key_min_offset;
3815 if (last_dest_end < destoff + len) {
3817 * We have an implicit hole (NO_HOLES feature is enabled) that
3818 * fully or partially overlaps our cloning range at its end.
3820 btrfs_release_path(path);
3823 * 1 - remove extent(s)
3826 trans = btrfs_start_transaction(root, 2);
3827 if (IS_ERR(trans)) {
3828 ret = PTR_ERR(trans);
3831 ret = btrfs_drop_extents(trans, root, inode,
3832 last_dest_end, destoff + len, 1);
3834 if (ret != -EOPNOTSUPP)
3835 btrfs_abort_transaction(trans, root, ret);
3836 btrfs_end_transaction(trans, root);
3839 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3840 destoff + len - last_dest_end);
3841 ret = clone_finish_inode_update(trans, inode, destoff + len,
3842 destoff, olen, no_time_update);
3846 btrfs_free_path(path);
3851 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
3852 u64 off, u64 olen, u64 destoff)
3854 struct inode *inode = file_inode(file);
3855 struct btrfs_root *root = BTRFS_I(inode)->root;
3860 u64 bs = root->fs_info->sb->s_blocksize;
3865 * - split compressed inline extents. annoying: we need to
3866 * decompress into destination's address_space (the file offset
3867 * may change, so source mapping won't do), then recompress (or
3868 * otherwise reinsert) a subrange.
3870 * - split destination inode's inline extents. The inline extents can
3871 * be either compressed or non-compressed.
3874 /* the destination must be opened for writing */
3875 if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
3878 if (btrfs_root_readonly(root))
3881 ret = mnt_want_write_file(file);
3885 src_file = fdget(srcfd);
3886 if (!src_file.file) {
3888 goto out_drop_write;
3892 if (src_file.file->f_path.mnt != file->f_path.mnt)
3895 src = file_inode(src_file.file);
3901 /* the src must be open for reading */
3902 if (!(src_file.file->f_mode & FMODE_READ))
3905 /* don't make the dst file partly checksummed */
3906 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3907 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
3911 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3915 if (src->i_sb != inode->i_sb)
3919 btrfs_double_inode_lock(src, inode);
3921 mutex_lock(&src->i_mutex);
3924 /* determine range to clone */
3926 if (off + len > src->i_size || off + len < off)
3929 olen = len = src->i_size - off;
3930 /* if we extend to eof, continue to block boundary */
3931 if (off + len == src->i_size)
3932 len = ALIGN(src->i_size, bs) - off;
3939 /* verify the end result is block aligned */
3940 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
3941 !IS_ALIGNED(destoff, bs))
3944 /* verify if ranges are overlapped within the same file */
3946 if (destoff + len > off && destoff < off + len)
3950 if (destoff > inode->i_size) {
3951 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3957 * Lock the target range too. Right after we replace the file extent
3958 * items in the fs tree (which now point to the cloned data), we might
3959 * have a worker replace them with extent items relative to a write
3960 * operation that was issued before this clone operation (i.e. confront
3961 * with inode.c:btrfs_finish_ordered_io).
3964 u64 lock_start = min_t(u64, off, destoff);
3965 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
3967 ret = lock_extent_range(src, lock_start, lock_len, true);
3969 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
3974 /* ranges in the io trees already unlocked */
3978 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3981 u64 lock_start = min_t(u64, off, destoff);
3982 u64 lock_end = max_t(u64, off, destoff) + len - 1;
3984 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
3986 btrfs_double_extent_unlock(src, off, inode, destoff, len);
3989 * Truncate page cache pages so that future reads will see the cloned
3990 * data immediately and not the previous data.
3992 truncate_inode_pages_range(&inode->i_data, destoff,
3993 PAGE_CACHE_ALIGN(destoff + len) - 1);
3996 btrfs_double_inode_unlock(src, inode);
3998 mutex_unlock(&src->i_mutex);
4002 mnt_drop_write_file(file);
4006 static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
4008 struct btrfs_ioctl_clone_range_args args;
4010 if (copy_from_user(&args, argp, sizeof(args)))
4012 return btrfs_ioctl_clone(file, args.src_fd, args.src_offset,
4013 args.src_length, args.dest_offset);
4017 * there are many ways the trans_start and trans_end ioctls can lead
4018 * to deadlocks. They should only be used by applications that
4019 * basically own the machine, and have a very in depth understanding
4020 * of all the possible deadlocks and enospc problems.
4022 static long btrfs_ioctl_trans_start(struct file *file)
4024 struct inode *inode = file_inode(file);
4025 struct btrfs_root *root = BTRFS_I(inode)->root;
4026 struct btrfs_trans_handle *trans;
4030 if (!capable(CAP_SYS_ADMIN))
4034 if (file->private_data)
4038 if (btrfs_root_readonly(root))
4041 ret = mnt_want_write_file(file);
4045 atomic_inc(&root->fs_info->open_ioctl_trans);
4048 trans = btrfs_start_ioctl_transaction(root);
4052 file->private_data = trans;
4056 atomic_dec(&root->fs_info->open_ioctl_trans);
4057 mnt_drop_write_file(file);
4062 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4064 struct inode *inode = file_inode(file);
4065 struct btrfs_root *root = BTRFS_I(inode)->root;
4066 struct btrfs_root *new_root;
4067 struct btrfs_dir_item *di;
4068 struct btrfs_trans_handle *trans;
4069 struct btrfs_path *path;
4070 struct btrfs_key location;
4071 struct btrfs_disk_key disk_key;
4076 if (!capable(CAP_SYS_ADMIN))
4079 ret = mnt_want_write_file(file);
4083 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4089 objectid = BTRFS_FS_TREE_OBJECTID;
4091 location.objectid = objectid;
4092 location.type = BTRFS_ROOT_ITEM_KEY;
4093 location.offset = (u64)-1;
4095 new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
4096 if (IS_ERR(new_root)) {
4097 ret = PTR_ERR(new_root);
4101 path = btrfs_alloc_path();
4106 path->leave_spinning = 1;
4108 trans = btrfs_start_transaction(root, 1);
4109 if (IS_ERR(trans)) {
4110 btrfs_free_path(path);
4111 ret = PTR_ERR(trans);
4115 dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
4116 di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
4117 dir_id, "default", 7, 1);
4118 if (IS_ERR_OR_NULL(di)) {
4119 btrfs_free_path(path);
4120 btrfs_end_transaction(trans, root);
4121 btrfs_err(new_root->fs_info, "Umm, you don't have the default dir"
4122 "item, this isn't going to work");
4127 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4128 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4129 btrfs_mark_buffer_dirty(path->nodes[0]);
4130 btrfs_free_path(path);
4132 btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
4133 btrfs_end_transaction(trans, root);
4135 mnt_drop_write_file(file);
4139 void btrfs_get_block_group_info(struct list_head *groups_list,
4140 struct btrfs_ioctl_space_info *space)
4142 struct btrfs_block_group_cache *block_group;
4144 space->total_bytes = 0;
4145 space->used_bytes = 0;
4147 list_for_each_entry(block_group, groups_list, list) {
4148 space->flags = block_group->flags;
4149 space->total_bytes += block_group->key.offset;
4150 space->used_bytes +=
4151 btrfs_block_group_used(&block_group->item);
4155 static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
4157 struct btrfs_ioctl_space_args space_args;
4158 struct btrfs_ioctl_space_info space;
4159 struct btrfs_ioctl_space_info *dest;
4160 struct btrfs_ioctl_space_info *dest_orig;
4161 struct btrfs_ioctl_space_info __user *user_dest;
4162 struct btrfs_space_info *info;
4163 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
4164 BTRFS_BLOCK_GROUP_SYSTEM,
4165 BTRFS_BLOCK_GROUP_METADATA,
4166 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
4173 if (copy_from_user(&space_args,
4174 (struct btrfs_ioctl_space_args __user *)arg,
4175 sizeof(space_args)))
4178 for (i = 0; i < num_types; i++) {
4179 struct btrfs_space_info *tmp;
4183 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
4185 if (tmp->flags == types[i]) {
4195 down_read(&info->groups_sem);
4196 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4197 if (!list_empty(&info->block_groups[c]))
4200 up_read(&info->groups_sem);
4204 * Global block reserve, exported as a space_info
4208 /* space_slots == 0 means they are asking for a count */
4209 if (space_args.space_slots == 0) {
4210 space_args.total_spaces = slot_count;
4214 slot_count = min_t(u64, space_args.space_slots, slot_count);
4216 alloc_size = sizeof(*dest) * slot_count;
4218 /* we generally have at most 6 or so space infos, one for each raid
4219 * level. So, a whole page should be more than enough for everyone
4221 if (alloc_size > PAGE_CACHE_SIZE)
4224 space_args.total_spaces = 0;
4225 dest = kmalloc(alloc_size, GFP_NOFS);
4230 /* now we have a buffer to copy into */
4231 for (i = 0; i < num_types; i++) {
4232 struct btrfs_space_info *tmp;
4239 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
4241 if (tmp->flags == types[i]) {
4250 down_read(&info->groups_sem);
4251 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4252 if (!list_empty(&info->block_groups[c])) {
4253 btrfs_get_block_group_info(
4254 &info->block_groups[c], &space);
4255 memcpy(dest, &space, sizeof(space));
4257 space_args.total_spaces++;
4263 up_read(&info->groups_sem);
4267 * Add global block reserve
4270 struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
4272 spin_lock(&block_rsv->lock);
4273 space.total_bytes = block_rsv->size;
4274 space.used_bytes = block_rsv->size - block_rsv->reserved;
4275 spin_unlock(&block_rsv->lock);
4276 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4277 memcpy(dest, &space, sizeof(space));
4278 space_args.total_spaces++;
4281 user_dest = (struct btrfs_ioctl_space_info __user *)
4282 (arg + sizeof(struct btrfs_ioctl_space_args));
4284 if (copy_to_user(user_dest, dest_orig, alloc_size))
4289 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4296 * there are many ways the trans_start and trans_end ioctls can lead
4297 * to deadlocks. They should only be used by applications that
4298 * basically own the machine, and have a very in depth understanding
4299 * of all the possible deadlocks and enospc problems.
4301 long btrfs_ioctl_trans_end(struct file *file)
4303 struct inode *inode = file_inode(file);
4304 struct btrfs_root *root = BTRFS_I(inode)->root;
4305 struct btrfs_trans_handle *trans;
4307 trans = file->private_data;
4310 file->private_data = NULL;
4312 btrfs_end_transaction(trans, root);
4314 atomic_dec(&root->fs_info->open_ioctl_trans);
4316 mnt_drop_write_file(file);
4320 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4323 struct btrfs_trans_handle *trans;
4327 trans = btrfs_attach_transaction_barrier(root);
4328 if (IS_ERR(trans)) {
4329 if (PTR_ERR(trans) != -ENOENT)
4330 return PTR_ERR(trans);
4332 /* No running transaction, don't bother */
4333 transid = root->fs_info->last_trans_committed;
4336 transid = trans->transid;
4337 ret = btrfs_commit_transaction_async(trans, root, 0);
4339 btrfs_end_transaction(trans, root);
4344 if (copy_to_user(argp, &transid, sizeof(transid)))
4349 static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
4355 if (copy_from_user(&transid, argp, sizeof(transid)))
4358 transid = 0; /* current trans */
4360 return btrfs_wait_for_commit(root, transid);
4363 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4365 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4366 struct btrfs_ioctl_scrub_args *sa;
4369 if (!capable(CAP_SYS_ADMIN))
4372 sa = memdup_user(arg, sizeof(*sa));
4376 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4377 ret = mnt_want_write_file(file);
4382 ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
4383 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4386 if (copy_to_user(arg, sa, sizeof(*sa)))
4389 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4390 mnt_drop_write_file(file);
4396 static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
4398 if (!capable(CAP_SYS_ADMIN))
4401 return btrfs_scrub_cancel(root->fs_info);
4404 static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
4407 struct btrfs_ioctl_scrub_args *sa;
4410 if (!capable(CAP_SYS_ADMIN))
4413 sa = memdup_user(arg, sizeof(*sa));
4417 ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
4419 if (copy_to_user(arg, sa, sizeof(*sa)))
4426 static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
4429 struct btrfs_ioctl_get_dev_stats *sa;
4432 sa = memdup_user(arg, sizeof(*sa));
4436 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4441 ret = btrfs_get_dev_stats(root, sa);
4443 if (copy_to_user(arg, sa, sizeof(*sa)))
4450 static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
4452 struct btrfs_ioctl_dev_replace_args *p;
4455 if (!capable(CAP_SYS_ADMIN))
4458 p = memdup_user(arg, sizeof(*p));
4463 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4464 if (root->fs_info->sb->s_flags & MS_RDONLY) {
4469 &root->fs_info->mutually_exclusive_operation_running,
4471 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4473 ret = btrfs_dev_replace_start(root, p);
4475 &root->fs_info->mutually_exclusive_operation_running,
4479 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4480 btrfs_dev_replace_status(root->fs_info, p);
4483 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4484 ret = btrfs_dev_replace_cancel(root->fs_info, p);
4491 if (copy_to_user(arg, p, sizeof(*p)))
4498 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4504 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4505 struct inode_fs_paths *ipath = NULL;
4506 struct btrfs_path *path;
4508 if (!capable(CAP_DAC_READ_SEARCH))
4511 path = btrfs_alloc_path();
4517 ipa = memdup_user(arg, sizeof(*ipa));
4524 size = min_t(u32, ipa->size, 4096);
4525 ipath = init_ipath(size, root, path);
4526 if (IS_ERR(ipath)) {
4527 ret = PTR_ERR(ipath);
4532 ret = paths_from_inode(ipa->inum, ipath);
4536 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4537 rel_ptr = ipath->fspath->val[i] -
4538 (u64)(unsigned long)ipath->fspath->val;
4539 ipath->fspath->val[i] = rel_ptr;
4542 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
4543 (void *)(unsigned long)ipath->fspath, size);
4550 btrfs_free_path(path);
4557 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4559 struct btrfs_data_container *inodes = ctx;
4560 const size_t c = 3 * sizeof(u64);
4562 if (inodes->bytes_left >= c) {
4563 inodes->bytes_left -= c;
4564 inodes->val[inodes->elem_cnt] = inum;
4565 inodes->val[inodes->elem_cnt + 1] = offset;
4566 inodes->val[inodes->elem_cnt + 2] = root;
4567 inodes->elem_cnt += 3;
4569 inodes->bytes_missing += c - inodes->bytes_left;
4570 inodes->bytes_left = 0;
4571 inodes->elem_missed += 3;
4577 static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
4582 struct btrfs_ioctl_logical_ino_args *loi;
4583 struct btrfs_data_container *inodes = NULL;
4584 struct btrfs_path *path = NULL;
4586 if (!capable(CAP_SYS_ADMIN))
4589 loi = memdup_user(arg, sizeof(*loi));
4596 path = btrfs_alloc_path();
4602 size = min_t(u32, loi->size, 64 * 1024);
4603 inodes = init_data_container(size);
4604 if (IS_ERR(inodes)) {
4605 ret = PTR_ERR(inodes);
4610 ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
4611 build_ino_list, inodes);
4617 ret = copy_to_user((void *)(unsigned long)loi->inodes,
4618 (void *)(unsigned long)inodes, size);
4623 btrfs_free_path(path);
4630 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
4631 struct btrfs_ioctl_balance_args *bargs)
4633 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4635 bargs->flags = bctl->flags;
4637 if (atomic_read(&fs_info->balance_running))
4638 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4639 if (atomic_read(&fs_info->balance_pause_req))
4640 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4641 if (atomic_read(&fs_info->balance_cancel_req))
4642 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4644 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4645 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4646 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4649 spin_lock(&fs_info->balance_lock);
4650 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4651 spin_unlock(&fs_info->balance_lock);
4653 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4657 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4659 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4660 struct btrfs_fs_info *fs_info = root->fs_info;
4661 struct btrfs_ioctl_balance_args *bargs;
4662 struct btrfs_balance_control *bctl;
4663 bool need_unlock; /* for mut. excl. ops lock */
4666 if (!capable(CAP_SYS_ADMIN))
4669 ret = mnt_want_write_file(file);
4674 if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
4675 mutex_lock(&fs_info->volume_mutex);
4676 mutex_lock(&fs_info->balance_mutex);
4682 * mut. excl. ops lock is locked. Three possibilites:
4683 * (1) some other op is running
4684 * (2) balance is running
4685 * (3) balance is paused -- special case (think resume)
4687 mutex_lock(&fs_info->balance_mutex);
4688 if (fs_info->balance_ctl) {
4689 /* this is either (2) or (3) */
4690 if (!atomic_read(&fs_info->balance_running)) {
4691 mutex_unlock(&fs_info->balance_mutex);
4692 if (!mutex_trylock(&fs_info->volume_mutex))
4694 mutex_lock(&fs_info->balance_mutex);
4696 if (fs_info->balance_ctl &&
4697 !atomic_read(&fs_info->balance_running)) {
4699 need_unlock = false;
4703 mutex_unlock(&fs_info->balance_mutex);
4704 mutex_unlock(&fs_info->volume_mutex);
4708 mutex_unlock(&fs_info->balance_mutex);
4714 mutex_unlock(&fs_info->balance_mutex);
4715 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4720 BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
4723 bargs = memdup_user(arg, sizeof(*bargs));
4724 if (IS_ERR(bargs)) {
4725 ret = PTR_ERR(bargs);
4729 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4730 if (!fs_info->balance_ctl) {
4735 bctl = fs_info->balance_ctl;
4736 spin_lock(&fs_info->balance_lock);
4737 bctl->flags |= BTRFS_BALANCE_RESUME;
4738 spin_unlock(&fs_info->balance_lock);
4746 if (fs_info->balance_ctl) {
4751 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4757 bctl->fs_info = fs_info;
4759 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4760 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4761 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4763 bctl->flags = bargs->flags;
4765 /* balance everything - no filters */
4766 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4769 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4776 * Ownership of bctl and mutually_exclusive_operation_running
4777 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
4778 * or, if restriper was paused all the way until unmount, in
4779 * free_fs_info. mutually_exclusive_operation_running is
4780 * cleared in __cancel_balance.
4782 need_unlock = false;
4784 ret = btrfs_balance(bctl, bargs);
4788 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4797 mutex_unlock(&fs_info->balance_mutex);
4798 mutex_unlock(&fs_info->volume_mutex);
4800 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
4802 mnt_drop_write_file(file);
4806 static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
4808 if (!capable(CAP_SYS_ADMIN))
4812 case BTRFS_BALANCE_CTL_PAUSE:
4813 return btrfs_pause_balance(root->fs_info);
4814 case BTRFS_BALANCE_CTL_CANCEL:
4815 return btrfs_cancel_balance(root->fs_info);
4821 static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
4824 struct btrfs_fs_info *fs_info = root->fs_info;
4825 struct btrfs_ioctl_balance_args *bargs;
4828 if (!capable(CAP_SYS_ADMIN))
4831 mutex_lock(&fs_info->balance_mutex);
4832 if (!fs_info->balance_ctl) {
4837 bargs = kzalloc(sizeof(*bargs), GFP_NOFS);
4843 update_ioctl_balance_args(fs_info, 1, bargs);
4845 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4850 mutex_unlock(&fs_info->balance_mutex);
4854 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4856 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4857 struct btrfs_ioctl_quota_ctl_args *sa;
4858 struct btrfs_trans_handle *trans = NULL;
4862 if (!capable(CAP_SYS_ADMIN))
4865 ret = mnt_want_write_file(file);
4869 sa = memdup_user(arg, sizeof(*sa));
4875 down_write(&root->fs_info->subvol_sem);
4876 trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
4877 if (IS_ERR(trans)) {
4878 ret = PTR_ERR(trans);
4883 case BTRFS_QUOTA_CTL_ENABLE:
4884 ret = btrfs_quota_enable(trans, root->fs_info);
4886 case BTRFS_QUOTA_CTL_DISABLE:
4887 ret = btrfs_quota_disable(trans, root->fs_info);
4894 err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
4899 up_write(&root->fs_info->subvol_sem);
4901 mnt_drop_write_file(file);
4905 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4907 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4908 struct btrfs_ioctl_qgroup_assign_args *sa;
4909 struct btrfs_trans_handle *trans;
4913 if (!capable(CAP_SYS_ADMIN))
4916 ret = mnt_want_write_file(file);
4920 sa = memdup_user(arg, sizeof(*sa));
4926 trans = btrfs_join_transaction(root);
4927 if (IS_ERR(trans)) {
4928 ret = PTR_ERR(trans);
4932 /* FIXME: check if the IDs really exist */
4934 ret = btrfs_add_qgroup_relation(trans, root->fs_info,
4937 ret = btrfs_del_qgroup_relation(trans, root->fs_info,
4941 /* update qgroup status and info */
4942 err = btrfs_run_qgroups(trans, root->fs_info);
4944 btrfs_std_error(root->fs_info, ret,
4945 "failed to update qgroup status and info\n");
4946 err = btrfs_end_transaction(trans, root);
4953 mnt_drop_write_file(file);
4957 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
4959 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4960 struct btrfs_ioctl_qgroup_create_args *sa;
4961 struct btrfs_trans_handle *trans;
4965 if (!capable(CAP_SYS_ADMIN))
4968 ret = mnt_want_write_file(file);
4972 sa = memdup_user(arg, sizeof(*sa));
4978 if (!sa->qgroupid) {
4983 trans = btrfs_join_transaction(root);
4984 if (IS_ERR(trans)) {
4985 ret = PTR_ERR(trans);
4989 /* FIXME: check if the IDs really exist */
4991 ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid);
4993 ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
4996 err = btrfs_end_transaction(trans, root);
5003 mnt_drop_write_file(file);
5007 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
5009 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5010 struct btrfs_ioctl_qgroup_limit_args *sa;
5011 struct btrfs_trans_handle *trans;
5016 if (!capable(CAP_SYS_ADMIN))
5019 ret = mnt_want_write_file(file);
5023 sa = memdup_user(arg, sizeof(*sa));
5029 trans = btrfs_join_transaction(root);
5030 if (IS_ERR(trans)) {
5031 ret = PTR_ERR(trans);
5035 qgroupid = sa->qgroupid;
5037 /* take the current subvol as qgroup */
5038 qgroupid = root->root_key.objectid;
5041 /* FIXME: check if the IDs really exist */
5042 ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
5044 err = btrfs_end_transaction(trans, root);
5051 mnt_drop_write_file(file);
5055 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5057 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5058 struct btrfs_ioctl_quota_rescan_args *qsa;
5061 if (!capable(CAP_SYS_ADMIN))
5064 ret = mnt_want_write_file(file);
5068 qsa = memdup_user(arg, sizeof(*qsa));
5079 ret = btrfs_qgroup_rescan(root->fs_info);
5084 mnt_drop_write_file(file);
5088 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5090 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5091 struct btrfs_ioctl_quota_rescan_args *qsa;
5094 if (!capable(CAP_SYS_ADMIN))
5097 qsa = kzalloc(sizeof(*qsa), GFP_NOFS);
5101 if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5103 qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
5106 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5113 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5115 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5117 if (!capable(CAP_SYS_ADMIN))
5120 return btrfs_qgroup_wait_for_completion(root->fs_info);
5123 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5124 struct btrfs_ioctl_received_subvol_args *sa)
5126 struct inode *inode = file_inode(file);
5127 struct btrfs_root *root = BTRFS_I(inode)->root;
5128 struct btrfs_root_item *root_item = &root->root_item;
5129 struct btrfs_trans_handle *trans;
5130 struct timespec ct = CURRENT_TIME;
5132 int received_uuid_changed;
5134 if (!inode_owner_or_capable(inode))
5137 ret = mnt_want_write_file(file);
5141 down_write(&root->fs_info->subvol_sem);
5143 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
5148 if (btrfs_root_readonly(root)) {
5155 * 2 - uuid items (received uuid + subvol uuid)
5157 trans = btrfs_start_transaction(root, 3);
5158 if (IS_ERR(trans)) {
5159 ret = PTR_ERR(trans);
5164 sa->rtransid = trans->transid;
5165 sa->rtime.sec = ct.tv_sec;
5166 sa->rtime.nsec = ct.tv_nsec;
5168 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5170 if (received_uuid_changed &&
5171 !btrfs_is_empty_uuid(root_item->received_uuid))
5172 btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
5173 root_item->received_uuid,
5174 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5175 root->root_key.objectid);
5176 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5177 btrfs_set_root_stransid(root_item, sa->stransid);
5178 btrfs_set_root_rtransid(root_item, sa->rtransid);
5179 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5180 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5181 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5182 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5184 ret = btrfs_update_root(trans, root->fs_info->tree_root,
5185 &root->root_key, &root->root_item);
5187 btrfs_end_transaction(trans, root);
5190 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5191 ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
5193 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5194 root->root_key.objectid);
5195 if (ret < 0 && ret != -EEXIST) {
5196 btrfs_abort_transaction(trans, root, ret);
5200 ret = btrfs_commit_transaction(trans, root);
5202 btrfs_abort_transaction(trans, root, ret);
5207 up_write(&root->fs_info->subvol_sem);
5208 mnt_drop_write_file(file);
5213 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5216 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5217 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5220 args32 = memdup_user(arg, sizeof(*args32));
5221 if (IS_ERR(args32)) {
5222 ret = PTR_ERR(args32);
5227 args64 = kmalloc(sizeof(*args64), GFP_NOFS);
5233 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5234 args64->stransid = args32->stransid;
5235 args64->rtransid = args32->rtransid;
5236 args64->stime.sec = args32->stime.sec;
5237 args64->stime.nsec = args32->stime.nsec;
5238 args64->rtime.sec = args32->rtime.sec;
5239 args64->rtime.nsec = args32->rtime.nsec;
5240 args64->flags = args32->flags;
5242 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5246 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5247 args32->stransid = args64->stransid;
5248 args32->rtransid = args64->rtransid;
5249 args32->stime.sec = args64->stime.sec;
5250 args32->stime.nsec = args64->stime.nsec;
5251 args32->rtime.sec = args64->rtime.sec;
5252 args32->rtime.nsec = args64->rtime.nsec;
5253 args32->flags = args64->flags;
5255 ret = copy_to_user(arg, args32, sizeof(*args32));
5266 static long btrfs_ioctl_set_received_subvol(struct file *file,
5269 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5272 sa = memdup_user(arg, sizeof(*sa));
5279 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5284 ret = copy_to_user(arg, sa, sizeof(*sa));
5293 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5295 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5298 char label[BTRFS_LABEL_SIZE];
5300 spin_lock(&root->fs_info->super_lock);
5301 memcpy(label, root->fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5302 spin_unlock(&root->fs_info->super_lock);
5304 len = strnlen(label, BTRFS_LABEL_SIZE);
5306 if (len == BTRFS_LABEL_SIZE) {
5307 btrfs_warn(root->fs_info,
5308 "label is too long, return the first %zu bytes", --len);
5311 ret = copy_to_user(arg, label, len);
5313 return ret ? -EFAULT : 0;
5316 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5318 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5319 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5320 struct btrfs_trans_handle *trans;
5321 char label[BTRFS_LABEL_SIZE];
5324 if (!capable(CAP_SYS_ADMIN))
5327 if (copy_from_user(label, arg, sizeof(label)))
5330 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5331 btrfs_err(root->fs_info, "unable to set label with more than %d bytes",
5332 BTRFS_LABEL_SIZE - 1);
5336 ret = mnt_want_write_file(file);
5340 trans = btrfs_start_transaction(root, 0);
5341 if (IS_ERR(trans)) {
5342 ret = PTR_ERR(trans);
5346 spin_lock(&root->fs_info->super_lock);
5347 strcpy(super_block->label, label);
5348 spin_unlock(&root->fs_info->super_lock);
5349 ret = btrfs_commit_transaction(trans, root);
5352 mnt_drop_write_file(file);
5356 #define INIT_FEATURE_FLAGS(suffix) \
5357 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5358 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5359 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5361 static int btrfs_ioctl_get_supported_features(struct file *file,
5364 static struct btrfs_ioctl_feature_flags features[3] = {
5365 INIT_FEATURE_FLAGS(SUPP),
5366 INIT_FEATURE_FLAGS(SAFE_SET),
5367 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5370 if (copy_to_user(arg, &features, sizeof(features)))
5376 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5378 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5379 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5380 struct btrfs_ioctl_feature_flags features;
5382 features.compat_flags = btrfs_super_compat_flags(super_block);
5383 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5384 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5386 if (copy_to_user(arg, &features, sizeof(features)))
5392 static int check_feature_bits(struct btrfs_root *root,
5393 enum btrfs_feature_set set,
5394 u64 change_mask, u64 flags, u64 supported_flags,
5395 u64 safe_set, u64 safe_clear)
5397 const char *type = btrfs_feature_set_names[set];
5399 u64 disallowed, unsupported;
5400 u64 set_mask = flags & change_mask;
5401 u64 clear_mask = ~flags & change_mask;
5403 unsupported = set_mask & ~supported_flags;
5405 names = btrfs_printable_features(set, unsupported);
5407 btrfs_warn(root->fs_info,
5408 "this kernel does not support the %s feature bit%s",
5409 names, strchr(names, ',') ? "s" : "");
5412 btrfs_warn(root->fs_info,
5413 "this kernel does not support %s bits 0x%llx",
5418 disallowed = set_mask & ~safe_set;
5420 names = btrfs_printable_features(set, disallowed);
5422 btrfs_warn(root->fs_info,
5423 "can't set the %s feature bit%s while mounted",
5424 names, strchr(names, ',') ? "s" : "");
5427 btrfs_warn(root->fs_info,
5428 "can't set %s bits 0x%llx while mounted",
5433 disallowed = clear_mask & ~safe_clear;
5435 names = btrfs_printable_features(set, disallowed);
5437 btrfs_warn(root->fs_info,
5438 "can't clear the %s feature bit%s while mounted",
5439 names, strchr(names, ',') ? "s" : "");
5442 btrfs_warn(root->fs_info,
5443 "can't clear %s bits 0x%llx while mounted",
5451 #define check_feature(root, change_mask, flags, mask_base) \
5452 check_feature_bits(root, FEAT_##mask_base, change_mask, flags, \
5453 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5454 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5455 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5457 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5459 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5460 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5461 struct btrfs_ioctl_feature_flags flags[2];
5462 struct btrfs_trans_handle *trans;
5466 if (!capable(CAP_SYS_ADMIN))
5469 if (copy_from_user(flags, arg, sizeof(flags)))
5473 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5474 !flags[0].incompat_flags)
5477 ret = check_feature(root, flags[0].compat_flags,
5478 flags[1].compat_flags, COMPAT);
5482 ret = check_feature(root, flags[0].compat_ro_flags,
5483 flags[1].compat_ro_flags, COMPAT_RO);
5487 ret = check_feature(root, flags[0].incompat_flags,
5488 flags[1].incompat_flags, INCOMPAT);
5492 trans = btrfs_start_transaction(root, 0);
5494 return PTR_ERR(trans);
5496 spin_lock(&root->fs_info->super_lock);
5497 newflags = btrfs_super_compat_flags(super_block);
5498 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5499 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5500 btrfs_set_super_compat_flags(super_block, newflags);
5502 newflags = btrfs_super_compat_ro_flags(super_block);
5503 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5504 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5505 btrfs_set_super_compat_ro_flags(super_block, newflags);
5507 newflags = btrfs_super_incompat_flags(super_block);
5508 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5509 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5510 btrfs_set_super_incompat_flags(super_block, newflags);
5511 spin_unlock(&root->fs_info->super_lock);
5513 return btrfs_commit_transaction(trans, root);
5516 long btrfs_ioctl(struct file *file, unsigned int
5517 cmd, unsigned long arg)
5519 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5520 void __user *argp = (void __user *)arg;
5523 case FS_IOC_GETFLAGS:
5524 return btrfs_ioctl_getflags(file, argp);
5525 case FS_IOC_SETFLAGS:
5526 return btrfs_ioctl_setflags(file, argp);
5527 case FS_IOC_GETVERSION:
5528 return btrfs_ioctl_getversion(file, argp);
5530 return btrfs_ioctl_fitrim(file, argp);
5531 case BTRFS_IOC_SNAP_CREATE:
5532 return btrfs_ioctl_snap_create(file, argp, 0);
5533 case BTRFS_IOC_SNAP_CREATE_V2:
5534 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5535 case BTRFS_IOC_SUBVOL_CREATE:
5536 return btrfs_ioctl_snap_create(file, argp, 1);
5537 case BTRFS_IOC_SUBVOL_CREATE_V2:
5538 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5539 case BTRFS_IOC_SNAP_DESTROY:
5540 return btrfs_ioctl_snap_destroy(file, argp);
5541 case BTRFS_IOC_SUBVOL_GETFLAGS:
5542 return btrfs_ioctl_subvol_getflags(file, argp);
5543 case BTRFS_IOC_SUBVOL_SETFLAGS:
5544 return btrfs_ioctl_subvol_setflags(file, argp);
5545 case BTRFS_IOC_DEFAULT_SUBVOL:
5546 return btrfs_ioctl_default_subvol(file, argp);
5547 case BTRFS_IOC_DEFRAG:
5548 return btrfs_ioctl_defrag(file, NULL);
5549 case BTRFS_IOC_DEFRAG_RANGE:
5550 return btrfs_ioctl_defrag(file, argp);
5551 case BTRFS_IOC_RESIZE:
5552 return btrfs_ioctl_resize(file, argp);
5553 case BTRFS_IOC_ADD_DEV:
5554 return btrfs_ioctl_add_dev(root, argp);
5555 case BTRFS_IOC_RM_DEV:
5556 return btrfs_ioctl_rm_dev(file, argp);
5557 case BTRFS_IOC_FS_INFO:
5558 return btrfs_ioctl_fs_info(root, argp);
5559 case BTRFS_IOC_DEV_INFO:
5560 return btrfs_ioctl_dev_info(root, argp);
5561 case BTRFS_IOC_BALANCE:
5562 return btrfs_ioctl_balance(file, NULL);
5563 case BTRFS_IOC_CLONE:
5564 return btrfs_ioctl_clone(file, arg, 0, 0, 0);
5565 case BTRFS_IOC_CLONE_RANGE:
5566 return btrfs_ioctl_clone_range(file, argp);
5567 case BTRFS_IOC_TRANS_START:
5568 return btrfs_ioctl_trans_start(file);
5569 case BTRFS_IOC_TRANS_END:
5570 return btrfs_ioctl_trans_end(file);
5571 case BTRFS_IOC_TREE_SEARCH:
5572 return btrfs_ioctl_tree_search(file, argp);
5573 case BTRFS_IOC_TREE_SEARCH_V2:
5574 return btrfs_ioctl_tree_search_v2(file, argp);
5575 case BTRFS_IOC_INO_LOOKUP:
5576 return btrfs_ioctl_ino_lookup(file, argp);
5577 case BTRFS_IOC_INO_PATHS:
5578 return btrfs_ioctl_ino_to_path(root, argp);
5579 case BTRFS_IOC_LOGICAL_INO:
5580 return btrfs_ioctl_logical_to_ino(root, argp);
5581 case BTRFS_IOC_SPACE_INFO:
5582 return btrfs_ioctl_space_info(root, argp);
5583 case BTRFS_IOC_SYNC: {
5586 ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
5589 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
5591 * The transaction thread may want to do more work,
5592 * namely it pokes the cleaner ktread that will start
5593 * processing uncleaned subvols.
5595 wake_up_process(root->fs_info->transaction_kthread);
5598 case BTRFS_IOC_START_SYNC:
5599 return btrfs_ioctl_start_sync(root, argp);
5600 case BTRFS_IOC_WAIT_SYNC:
5601 return btrfs_ioctl_wait_sync(root, argp);
5602 case BTRFS_IOC_SCRUB:
5603 return btrfs_ioctl_scrub(file, argp);
5604 case BTRFS_IOC_SCRUB_CANCEL:
5605 return btrfs_ioctl_scrub_cancel(root, argp);
5606 case BTRFS_IOC_SCRUB_PROGRESS:
5607 return btrfs_ioctl_scrub_progress(root, argp);
5608 case BTRFS_IOC_BALANCE_V2:
5609 return btrfs_ioctl_balance(file, argp);
5610 case BTRFS_IOC_BALANCE_CTL:
5611 return btrfs_ioctl_balance_ctl(root, arg);
5612 case BTRFS_IOC_BALANCE_PROGRESS:
5613 return btrfs_ioctl_balance_progress(root, argp);
5614 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5615 return btrfs_ioctl_set_received_subvol(file, argp);
5617 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5618 return btrfs_ioctl_set_received_subvol_32(file, argp);
5620 case BTRFS_IOC_SEND:
5621 return btrfs_ioctl_send(file, argp);
5622 case BTRFS_IOC_GET_DEV_STATS:
5623 return btrfs_ioctl_get_dev_stats(root, argp);
5624 case BTRFS_IOC_QUOTA_CTL:
5625 return btrfs_ioctl_quota_ctl(file, argp);
5626 case BTRFS_IOC_QGROUP_ASSIGN:
5627 return btrfs_ioctl_qgroup_assign(file, argp);
5628 case BTRFS_IOC_QGROUP_CREATE:
5629 return btrfs_ioctl_qgroup_create(file, argp);
5630 case BTRFS_IOC_QGROUP_LIMIT:
5631 return btrfs_ioctl_qgroup_limit(file, argp);
5632 case BTRFS_IOC_QUOTA_RESCAN:
5633 return btrfs_ioctl_quota_rescan(file, argp);
5634 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5635 return btrfs_ioctl_quota_rescan_status(file, argp);
5636 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5637 return btrfs_ioctl_quota_rescan_wait(file, argp);
5638 case BTRFS_IOC_DEV_REPLACE:
5639 return btrfs_ioctl_dev_replace(root, argp);
5640 case BTRFS_IOC_GET_FSLABEL:
5641 return btrfs_ioctl_get_fslabel(file, argp);
5642 case BTRFS_IOC_SET_FSLABEL:
5643 return btrfs_ioctl_set_fslabel(file, argp);
5644 case BTRFS_IOC_FILE_EXTENT_SAME:
5645 return btrfs_ioctl_file_extent_same(file, argp);
5646 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5647 return btrfs_ioctl_get_supported_features(file, argp);
5648 case BTRFS_IOC_GET_FEATURES:
5649 return btrfs_ioctl_get_features(file, argp);
5650 case BTRFS_IOC_SET_FEATURES:
5651 return btrfs_ioctl_set_features(file, argp);