| /* |
| * Copyright (C) 2007 Oracle. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public |
| * License v2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public |
| * License along with this program; if not, write to the |
| * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| * Boston, MA 021110-1307, USA. |
| */ |
| |
| #include <linux/kernel.h> |
| #include <linux/bio.h> |
| #include <linux/buffer_head.h> |
| #include <linux/file.h> |
| #include <linux/fs.h> |
| #include <linux/fsnotify.h> |
| #include <linux/pagemap.h> |
| #include <linux/highmem.h> |
| #include <linux/time.h> |
| #include <linux/init.h> |
| #include <linux/string.h> |
| #include <linux/backing-dev.h> |
| #include <linux/mount.h> |
| #include <linux/mpage.h> |
| #include <linux/namei.h> |
| #include <linux/swap.h> |
| #include <linux/writeback.h> |
| #include <linux/statfs.h> |
| #include <linux/compat.h> |
| #include <linux/bit_spinlock.h> |
| #include <linux/security.h> |
| #include <linux/xattr.h> |
| #include <linux/vmalloc.h> |
| #include <linux/slab.h> |
| #include <linux/blkdev.h> |
| #include <linux/uuid.h> |
| #include <linux/btrfs.h> |
| #include <linux/uaccess.h> |
| #include "ctree.h" |
| #include "disk-io.h" |
| #include "transaction.h" |
| #include "btrfs_inode.h" |
| #include "print-tree.h" |
| #include "volumes.h" |
| #include "locking.h" |
| #include "inode-map.h" |
| #include "backref.h" |
| #include "rcu-string.h" |
| #include "send.h" |
| #include "dev-replace.h" |
| #include "props.h" |
| #include "sysfs.h" |
| #include "qgroup.h" |
| |
| #ifdef CONFIG_64BIT |
| /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI |
| * structures are incorrect, as the timespec structure from userspace |
| * is 4 bytes too small. We define these alternatives here to teach |
| * the kernel about the 32-bit struct packing. |
| */ |
| struct btrfs_ioctl_timespec_32 { |
| __u64 sec; |
| __u32 nsec; |
| } __attribute__ ((__packed__)); |
| |
| struct btrfs_ioctl_received_subvol_args_32 { |
| char uuid[BTRFS_UUID_SIZE]; /* in */ |
| __u64 stransid; /* in */ |
| __u64 rtransid; /* out */ |
| struct btrfs_ioctl_timespec_32 stime; /* in */ |
| struct btrfs_ioctl_timespec_32 rtime; /* out */ |
| __u64 flags; /* in */ |
| __u64 reserved[16]; /* in */ |
| } __attribute__ ((__packed__)); |
| |
| #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \ |
| struct btrfs_ioctl_received_subvol_args_32) |
| #endif |
| |
| |
| static int btrfs_clone(struct inode *src, struct inode *inode, |
| u64 off, u64 olen, u64 olen_aligned, u64 destoff); |
| |
| /* Mask out flags that are inappropriate for the given type of inode. */ |
| static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
| { |
| if (S_ISDIR(mode)) |
| return flags; |
| else if (S_ISREG(mode)) |
| return flags & ~FS_DIRSYNC_FL; |
| else |
| return flags & (FS_NODUMP_FL | FS_NOATIME_FL); |
| } |
| |
| /* |
| * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl. |
| */ |
| static unsigned int btrfs_flags_to_ioctl(unsigned int flags) |
| { |
| unsigned int iflags = 0; |
| |
| if (flags & BTRFS_INODE_SYNC) |
| iflags |= FS_SYNC_FL; |
| if (flags & BTRFS_INODE_IMMUTABLE) |
| iflags |= FS_IMMUTABLE_FL; |
| if (flags & BTRFS_INODE_APPEND) |
| iflags |= FS_APPEND_FL; |
| if (flags & BTRFS_INODE_NODUMP) |
| iflags |= FS_NODUMP_FL; |
| if (flags & BTRFS_INODE_NOATIME) |
| iflags |= FS_NOATIME_FL; |
| if (flags & BTRFS_INODE_DIRSYNC) |
| iflags |= FS_DIRSYNC_FL; |
| if (flags & BTRFS_INODE_NODATACOW) |
| iflags |= FS_NOCOW_FL; |
| |
| if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) |
| iflags |= FS_COMPR_FL; |
| else if (flags & BTRFS_INODE_NOCOMPRESS) |
| iflags |= FS_NOCOMP_FL; |
| |
| return iflags; |
| } |
| |
| /* |
| * Update inode->i_flags based on the btrfs internal flags. |
| */ |
| void btrfs_update_iflags(struct inode *inode) |
| { |
| struct btrfs_inode *ip = BTRFS_I(inode); |
| unsigned int new_fl = 0; |
| |
| if (ip->flags & BTRFS_INODE_SYNC) |
| new_fl |= S_SYNC; |
| if (ip->flags & BTRFS_INODE_IMMUTABLE) |
| new_fl |= S_IMMUTABLE; |
| if (ip->flags & BTRFS_INODE_APPEND) |
| new_fl |= S_APPEND; |
| if (ip->flags & BTRFS_INODE_NOATIME) |
| new_fl |= S_NOATIME; |
| if (ip->flags & BTRFS_INODE_DIRSYNC) |
| new_fl |= S_DIRSYNC; |
| |
| set_mask_bits(&inode->i_flags, |
| S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC, |
| new_fl); |
| } |
| |
| /* |
| * Inherit flags from the parent inode. |
| * |
| * Currently only the compression flags and the cow flags are inherited. |
| */ |
| void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) |
| { |
| unsigned int flags; |
| |
| if (!dir) |
| return; |
| |
| flags = BTRFS_I(dir)->flags; |
| |
| if (flags & BTRFS_INODE_NOCOMPRESS) { |
| BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; |
| BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
| } else if (flags & BTRFS_INODE_COMPRESS) { |
| BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; |
| BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; |
| } |
| |
| if (flags & BTRFS_INODE_NODATACOW) { |
| BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; |
| if (S_ISREG(inode->i_mode)) |
| BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; |
| } |
| |
| btrfs_update_iflags(inode); |
| } |
| |
| static int btrfs_ioctl_getflags(struct file *file, void __user *arg) |
| { |
| struct btrfs_inode *ip = BTRFS_I(file_inode(file)); |
| unsigned int flags = btrfs_flags_to_ioctl(ip->flags); |
| |
| if (copy_to_user(arg, &flags, sizeof(flags))) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static int check_flags(unsigned int flags) |
| { |
| if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ |
| FS_NOATIME_FL | FS_NODUMP_FL | \ |
| FS_SYNC_FL | FS_DIRSYNC_FL | \ |
| FS_NOCOMP_FL | FS_COMPR_FL | |
| FS_NOCOW_FL)) |
| return -EOPNOTSUPP; |
| |
| if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int btrfs_ioctl_setflags(struct file *file, void __user *arg) |
| { |
| struct inode *inode = file_inode(file); |
| struct btrfs_inode *ip = BTRFS_I(inode); |
| struct btrfs_root *root = ip->root; |
| struct btrfs_trans_handle *trans; |
| unsigned int flags, oldflags; |
| int ret; |
| u64 ip_oldflags; |
| unsigned int i_oldflags; |
| umode_t mode; |
| |
| if (!inode_owner_or_capable(inode)) |
| return -EPERM; |
| |
| if (btrfs_root_readonly(root)) |
| return -EROFS; |
| |
| if (copy_from_user(&flags, arg, sizeof(flags))) |
| return -EFAULT; |
| |
| ret = check_flags(flags); |
| if (ret) |
| return ret; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| return ret; |
| |
| mutex_lock(&inode->i_mutex); |
| |
| ip_oldflags = ip->flags; |
| i_oldflags = inode->i_flags; |
| mode = inode->i_mode; |
| |
| flags = btrfs_mask_flags(inode->i_mode, flags); |
| oldflags = btrfs_flags_to_ioctl(ip->flags); |
| if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { |
| if (!capable(CAP_LINUX_IMMUTABLE)) { |
| ret = -EPERM; |
| goto out_unlock; |
| } |
| } |
| |
| if (flags & FS_SYNC_FL) |
| ip->flags |= BTRFS_INODE_SYNC; |
| else |
| ip->flags &= ~BTRFS_INODE_SYNC; |
| if (flags & FS_IMMUTABLE_FL) |
| ip->flags |= BTRFS_INODE_IMMUTABLE; |
| else |
| ip->flags &= ~BTRFS_INODE_IMMUTABLE; |
| if (flags & FS_APPEND_FL) |
| ip->flags |= BTRFS_INODE_APPEND; |
| else |
| ip->flags &= ~BTRFS_INODE_APPEND; |
| if (flags & FS_NODUMP_FL) |
| ip->flags |= BTRFS_INODE_NODUMP; |
| else |
| ip->flags &= ~BTRFS_INODE_NODUMP; |
| if (flags & FS_NOATIME_FL) |
| ip->flags |= BTRFS_INODE_NOATIME; |
| else |
| ip->flags &= ~BTRFS_INODE_NOATIME; |
| if (flags & FS_DIRSYNC_FL) |
| ip->flags |= BTRFS_INODE_DIRSYNC; |
| else |
| ip->flags &= ~BTRFS_INODE_DIRSYNC; |
| if (flags & FS_NOCOW_FL) { |
| if (S_ISREG(mode)) { |
| /* |
| * It's safe to turn csums off here, no extents exist. |
| * Otherwise we want the flag to reflect the real COW |
| * status of the file and will not set it. |
| */ |
| if (inode->i_size == 0) |
| ip->flags |= BTRFS_INODE_NODATACOW |
| | BTRFS_INODE_NODATASUM; |
| } else { |
| ip->flags |= BTRFS_INODE_NODATACOW; |
| } |
| } else { |
| /* |
| * Revert back under same assuptions as above |
| */ |
| if (S_ISREG(mode)) { |
| if (inode->i_size == 0) |
| ip->flags &= ~(BTRFS_INODE_NODATACOW |
| | BTRFS_INODE_NODATASUM); |
| } else { |
| ip->flags &= ~BTRFS_INODE_NODATACOW; |
| } |
| } |
| |
| /* |
| * The COMPRESS flag can only be changed by users, while the NOCOMPRESS |
| * flag may be changed automatically if compression code won't make |
| * things smaller. |
| */ |
| if (flags & FS_NOCOMP_FL) { |
| ip->flags &= ~BTRFS_INODE_COMPRESS; |
| ip->flags |= BTRFS_INODE_NOCOMPRESS; |
| |
| ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0); |
| if (ret && ret != -ENODATA) |
| goto out_drop; |
| } else if (flags & FS_COMPR_FL) { |
| const char *comp; |
| |
| ip->flags |= BTRFS_INODE_COMPRESS; |
| ip->flags &= ~BTRFS_INODE_NOCOMPRESS; |
| |
| if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO) |
| comp = "lzo"; |
| else |
| comp = "zlib"; |
| ret = btrfs_set_prop(inode, "btrfs.compression", |
| comp, strlen(comp), 0); |
| if (ret) |
| goto out_drop; |
| |
| } else { |
| ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); |
| } |
| |
| trans = btrfs_start_transaction(root, 1); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| goto out_drop; |
| } |
| |
| btrfs_update_iflags(inode); |
| inode_inc_iversion(inode); |
| inode->i_ctime = CURRENT_TIME; |
| ret = btrfs_update_inode(trans, root, inode); |
| |
| btrfs_end_transaction(trans, root); |
| out_drop: |
| if (ret) { |
| ip->flags = ip_oldflags; |
| inode->i_flags = i_oldflags; |
| } |
| |
| out_unlock: |
| mutex_unlock(&inode->i_mutex); |
| mnt_drop_write_file(file); |
| return ret; |
| } |
| |
| static int btrfs_ioctl_getversion(struct file *file, int __user *arg) |
| { |
| struct inode *inode = file_inode(file); |
| |
| return put_user(inode->i_generation, arg); |
| } |
| |
| static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb); |
| struct btrfs_device *device; |
| struct request_queue *q; |
| struct fstrim_range range; |
| u64 minlen = ULLONG_MAX; |
| u64 num_devices = 0; |
| u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy); |
| int ret; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, |
| dev_list) { |
| if (!device->bdev) |
| continue; |
| q = bdev_get_queue(device->bdev); |
| if (blk_queue_discard(q)) { |
| num_devices++; |
| minlen = min((u64)q->limits.discard_granularity, |
| minlen); |
| } |
| } |
| rcu_read_unlock(); |
| |
| if (!num_devices) |
| return -EOPNOTSUPP; |
| if (copy_from_user(&range, arg, sizeof(range))) |
| return -EFAULT; |
| if (range.start > total_bytes || |
| range.len < fs_info->sb->s_blocksize) |
| return -EINVAL; |
| |
| range.len = min(range.len, total_bytes - range.start); |
| range.minlen = max(range.minlen, minlen); |
| ret = btrfs_trim_fs(fs_info->tree_root, &range); |
| if (ret < 0) |
| return ret; |
| |
| if (copy_to_user(arg, &range, sizeof(range))) |
| return -EFAULT; |
| |
| return 0; |
| } |
| |
| int btrfs_is_empty_uuid(u8 *uuid) |
| { |
| int i; |
| |
| for (i = 0; i < BTRFS_UUID_SIZE; i++) { |
| if (uuid[i]) |
| return 0; |
| } |
| return 1; |
| } |
| |
| static noinline int create_subvol(struct inode *dir, |
| struct dentry *dentry, |
| char *name, int namelen, |
| u64 *async_transid, |
| struct btrfs_qgroup_inherit *inherit) |
| { |
| struct btrfs_trans_handle *trans; |
| struct btrfs_key key; |
| struct btrfs_root_item root_item; |
| struct btrfs_inode_item *inode_item; |
| struct extent_buffer *leaf; |
| struct btrfs_root *root = BTRFS_I(dir)->root; |
| struct btrfs_root *new_root; |
| struct btrfs_block_rsv block_rsv; |
| struct timespec cur_time = CURRENT_TIME; |
| struct inode *inode; |
| int ret; |
| int err; |
| u64 objectid; |
| u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; |
| u64 index = 0; |
| u64 qgroup_reserved; |
| uuid_le new_uuid; |
| |
| ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); |
| if (ret) |
| return ret; |
| |
| btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); |
| /* |
| * The same as the snapshot creation, please see the comment |
| * of create_snapshot(). |
| */ |
| ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, |
| 8, &qgroup_reserved, false); |
| if (ret) |
| return ret; |
| |
| trans = btrfs_start_transaction(root, 0); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| btrfs_subvolume_release_metadata(root, &block_rsv, |
| qgroup_reserved); |
| return ret; |
| } |
| trans->block_rsv = &block_rsv; |
| trans->bytes_reserved = block_rsv.size; |
| |
| ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit); |
| if (ret) |
| goto fail; |
| |
| leaf = btrfs_alloc_free_block(trans, root, root->leafsize, |
| 0, objectid, NULL, 0, 0, 0); |
| if (IS_ERR(leaf)) { |
| ret = PTR_ERR(leaf); |
| goto fail; |
| } |
| |
| memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); |
| btrfs_set_header_bytenr(leaf, leaf->start); |
| btrfs_set_header_generation(leaf, trans->transid); |
| btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); |
| btrfs_set_header_owner(leaf, objectid); |
| |
| write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(), |
| BTRFS_FSID_SIZE); |
| write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, |
| btrfs_header_chunk_tree_uuid(leaf), |
| BTRFS_UUID_SIZE); |
| btrfs_mark_buffer_dirty(leaf); |
| |
| memset(&root_item, 0, sizeof(root_item)); |
| |
| inode_item = &root_item.inode; |
| btrfs_set_stack_inode_generation(inode_item, 1); |
| btrfs_set_stack_inode_size(inode_item, 3); |
| btrfs_set_stack_inode_nlink(inode_item, 1); |
| btrfs_set_stack_inode_nbytes(inode_item, root->leafsize); |
| btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); |
| |
| btrfs_set_root_flags(&root_item, 0); |
| btrfs_set_root_limit(&root_item, 0); |
| btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT); |
| |
| btrfs_set_root_bytenr(&root_item, leaf->start); |
| btrfs_set_root_generation(&root_item, trans->transid); |
| btrfs_set_root_level(&root_item, 0); |
| btrfs_set_root_refs(&root_item, 1); |
| btrfs_set_root_used(&root_item, leaf->len); |
| btrfs_set_root_last_snapshot(&root_item, 0); |
| |
| btrfs_set_root_generation_v2(&root_item, |
| btrfs_root_generation(&root_item)); |
| uuid_le_gen(&new_uuid); |
| memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); |
| btrfs_set_stack_timespec_sec(&root_item.otime, cur_time.tv_sec); |
| btrfs_set_stack_timespec_nsec(&root_item.otime, cur_time.tv_nsec); |
| root_item.ctime = root_item.otime; |
| btrfs_set_root_ctransid(&root_item, trans->transid); |
| btrfs_set_root_otransid(&root_item, trans->transid); |
| |
| btrfs_tree_unlock(leaf); |
| free_extent_buffer(leaf); |
| leaf = NULL; |
| |
| btrfs_set_root_dirid(&root_item, new_dirid); |
| |
| key.objectid = objectid; |
| key.offset = 0; |
| btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); |
| ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, |
| &root_item); |
| if (ret) |
| goto fail; |
| |
| key.offset = (u64)-1; |
| new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); |
| if (IS_ERR(new_root)) { |
| btrfs_abort_transaction(trans, root, PTR_ERR(new_root)); |
| ret = PTR_ERR(new_root); |
| goto fail; |
| } |
| |
| btrfs_record_root_in_trans(trans, new_root); |
| |
| ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid); |
| if (ret) { |
| /* We potentially lose an unused inode item here */ |
| btrfs_abort_transaction(trans, root, ret); |
| goto fail; |
| } |
| |
| /* |
| * insert the directory item |
| */ |
| ret = btrfs_set_inode_index(dir, &index); |
| if (ret) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto fail; |
| } |
| |
| ret = btrfs_insert_dir_item(trans, root, |
| name, namelen, dir, &key, |
| BTRFS_FT_DIR, index); |
| if (ret) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto fail; |
| } |
| |
| btrfs_i_size_write(dir, dir->i_size + namelen * 2); |
| ret = btrfs_update_inode(trans, root, dir); |
| BUG_ON(ret); |
| |
| ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, |
| objectid, root->root_key.objectid, |
| btrfs_ino(dir), index, name, namelen); |
| BUG_ON(ret); |
| |
| ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root, |
| root_item.uuid, BTRFS_UUID_KEY_SUBVOL, |
| objectid); |
| if (ret) |
| btrfs_abort_transaction(trans, root, ret); |
| |
| fail: |
| trans->block_rsv = NULL; |
| trans->bytes_reserved = 0; |
| btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved); |
| |
| if (async_transid) { |
| *async_transid = trans->transid; |
| err = btrfs_commit_transaction_async(trans, root, 1); |
| if (err) |
| err = btrfs_commit_transaction(trans, root); |
| } else { |
| err = btrfs_commit_transaction(trans, root); |
| } |
| if (err && !ret) |
| ret = err; |
| |
| if (!ret) { |
| inode = btrfs_lookup_dentry(dir, dentry); |
| if (IS_ERR(inode)) |
| return PTR_ERR(inode); |
| d_instantiate(dentry, inode); |
| } |
| return ret; |
| } |
| |
| static void btrfs_wait_nocow_write(struct btrfs_root *root) |
| { |
| s64 writers; |
| DEFINE_WAIT(wait); |
| |
| do { |
| prepare_to_wait(&root->subv_writers->wait, &wait, |
| TASK_UNINTERRUPTIBLE); |
| |
| writers = percpu_counter_sum(&root->subv_writers->counter); |
| if (writers) |
| schedule(); |
| |
| finish_wait(&root->subv_writers->wait, &wait); |
| } while (writers); |
| } |
| |
| static int create_snapshot(struct btrfs_root *root, struct inode *dir, |
| struct dentry *dentry, char *name, int namelen, |
| u64 *async_transid, bool readonly, |
| struct btrfs_qgroup_inherit *inherit) |
| { |
| struct inode *inode; |
| struct btrfs_pending_snapshot *pending_snapshot; |
| struct btrfs_trans_handle *trans; |
| int ret; |
| |
| if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) |
| return -EINVAL; |
| |
| atomic_inc(&root->will_be_snapshoted); |
| smp_mb__after_atomic_inc(); |
| btrfs_wait_nocow_write(root); |
| |
| ret = btrfs_start_delalloc_inodes(root, 0); |
| if (ret) |
| goto out; |
| |
| btrfs_wait_ordered_extents(root, -1); |
| |
| pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); |
| if (!pending_snapshot) { |
| ret = -ENOMEM; |
| goto out; |
| } |
| |
| btrfs_init_block_rsv(&pending_snapshot->block_rsv, |
| BTRFS_BLOCK_RSV_TEMP); |
| /* |
| * 1 - parent dir inode |
| * 2 - dir entries |
| * 1 - root item |
| * 2 - root ref/backref |
| * 1 - root of snapshot |
| * 1 - UUID item |
| */ |
| ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, |
| &pending_snapshot->block_rsv, 8, |
| &pending_snapshot->qgroup_reserved, |
| false); |
| if (ret) |
| goto free; |
| |
| pending_snapshot->dentry = dentry; |
| pending_snapshot->root = root; |
| pending_snapshot->readonly = readonly; |
| pending_snapshot->dir = dir; |
| pending_snapshot->inherit = inherit; |
| |
| trans = btrfs_start_transaction(root, 0); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| goto fail; |
| } |
| |
| spin_lock(&root->fs_info->trans_lock); |
| list_add(&pending_snapshot->list, |
| &trans->transaction->pending_snapshots); |
| spin_unlock(&root->fs_info->trans_lock); |
| if (async_transid) { |
| *async_transid = trans->transid; |
| ret = btrfs_commit_transaction_async(trans, |
| root->fs_info->extent_root, 1); |
| if (ret) |
| ret = btrfs_commit_transaction(trans, root); |
| } else { |
| ret = btrfs_commit_transaction(trans, |
| root->fs_info->extent_root); |
| } |
| if (ret) |
| goto fail; |
| |
| ret = pending_snapshot->error; |
| if (ret) |
| goto fail; |
| |
| ret = btrfs_orphan_cleanup(pending_snapshot->snap); |
| if (ret) |
| goto fail; |
| |
| /* |
| * If orphan cleanup did remove any orphans, it means the tree was |
| * modified and therefore the commit root is not the same as the |
| * current root anymore. This is a problem, because send uses the |
| * commit root and therefore can see inode items that don't exist |
| * in the current root anymore, and for example make calls to |
| * btrfs_iget, which will do tree lookups based on the current root |
| * and not on the commit root. Those lookups will fail, returning a |
| * -ESTALE error, and making send fail with that error. So make sure |
| * a send does not see any orphans we have just removed, and that it |
| * will see the same inodes regardless of whether a transaction |
| * commit happened before it started (meaning that the commit root |
| * will be the same as the current root) or not. |
| */ |
| if (readonly && pending_snapshot->snap->node != |
| pending_snapshot->snap->commit_root) { |
| trans = btrfs_join_transaction(pending_snapshot->snap); |
| if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) { |
| ret = PTR_ERR(trans); |
| goto fail; |
| } |
| if (!IS_ERR(trans)) { |
| ret = btrfs_commit_transaction(trans, |
| pending_snapshot->snap); |
| if (ret) |
| goto fail; |
| } |
| } |
| |
| inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); |
| if (IS_ERR(inode)) { |
| ret = PTR_ERR(inode); |
| goto fail; |
| } |
| |
| d_instantiate(dentry, inode); |
| ret = 0; |
| fail: |
| btrfs_subvolume_release_metadata(BTRFS_I(dir)->root, |
| &pending_snapshot->block_rsv, |
| pending_snapshot->qgroup_reserved); |
| free: |
| kfree(pending_snapshot); |
| out: |
| atomic_dec(&root->will_be_snapshoted); |
| return ret; |
| } |
| |
| /* copy of check_sticky in fs/namei.c() |
| * It's inline, so penalty for filesystems that don't use sticky bit is |
| * minimal. |
| */ |
| static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode) |
| { |
| kuid_t fsuid = current_fsuid(); |
| |
| if (!(dir->i_mode & S_ISVTX)) |
| return 0; |
| if (uid_eq(inode->i_uid, fsuid)) |
| return 0; |
| if (uid_eq(dir->i_uid, fsuid)) |
| return 0; |
| return !capable(CAP_FOWNER); |
| } |
| |
| /* copy of may_delete in fs/namei.c() |
| * Check whether we can remove a link victim from directory dir, check |
| * whether the type of victim is right. |
| * 1. We can't do it if dir is read-only (done in permission()) |
| * 2. We should have write and exec permissions on dir |
| * 3. We can't remove anything from append-only dir |
| * 4. We can't do anything with immutable dir (done in permission()) |
| * 5. If the sticky bit on dir is set we should either |
| * a. be owner of dir, or |
| * b. be owner of victim, or |
| * c. have CAP_FOWNER capability |
| * 6. If the victim is append-only or immutable we can't do antyhing with |
| * links pointing to it. |
| * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. |
| * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. |
| * 9. We can't remove a root or mountpoint. |
| * 10. We don't allow removal of NFS sillyrenamed files; it's handled by |
| * nfs_async_unlink(). |
| */ |
| |
| static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) |
| { |
| int error; |
| |
| if (!victim->d_inode) |
| return -ENOENT; |
| |
| BUG_ON(victim->d_parent->d_inode != dir); |
| audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); |
| |
| error = inode_permission(dir, MAY_WRITE | MAY_EXEC); |
| if (error) |
| return error; |
| if (IS_APPEND(dir)) |
| return -EPERM; |
| if (btrfs_check_sticky(dir, victim->d_inode)|| |
| IS_APPEND(victim->d_inode)|| |
| IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) |
| return -EPERM; |
| if (isdir) { |
| if (!S_ISDIR(victim->d_inode->i_mode)) |
| return -ENOTDIR; |
| if (IS_ROOT(victim)) |
| return -EBUSY; |
| } else if (S_ISDIR(victim->d_inode->i_mode)) |
| return -EISDIR; |
| if (IS_DEADDIR(dir)) |
| return -ENOENT; |
| if (victim->d_flags & DCACHE_NFSFS_RENAMED) |
| return -EBUSY; |
| return 0; |
| } |
| |
| /* copy of may_create in fs/namei.c() */ |
| static inline int btrfs_may_create(struct inode *dir, struct dentry *child) |
| { |
| if (child->d_inode) |
| return -EEXIST; |
| if (IS_DEADDIR(dir)) |
| return -ENOENT; |
| return inode_permission(dir, MAY_WRITE | MAY_EXEC); |
| } |
| |
| /* |
| * Create a new subvolume below @parent. This is largely modeled after |
| * sys_mkdirat and vfs_mkdir, but we only do a single component lookup |
| * inside this filesystem so it's quite a bit simpler. |
| */ |
| static noinline int btrfs_mksubvol(struct path *parent, |
| char *name, int namelen, |
| struct btrfs_root *snap_src, |
| u64 *async_transid, bool readonly, |
| struct btrfs_qgroup_inherit *inherit) |
| { |
| struct inode *dir = parent->dentry->d_inode; |
| struct dentry *dentry; |
| int error; |
| |
| error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| if (error == -EINTR) |
| return error; |
| |
| dentry = lookup_one_len(name, parent->dentry, namelen); |
| error = PTR_ERR(dentry); |
| if (IS_ERR(dentry)) |
| goto out_unlock; |
| |
| error = -EEXIST; |
| if (dentry->d_inode) |
| goto out_dput; |
| |
| error = btrfs_may_create(dir, dentry); |
| if (error) |
| goto out_dput; |
| |
| /* |
| * even if this name doesn't exist, we may get hash collisions. |
| * check for them now when we can safely fail |
| */ |
| error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, |
| dir->i_ino, name, |
| namelen); |
| if (error) |
| goto out_dput; |
| |
| down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem); |
| |
| if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0) |
| goto out_up_read; |
| |
| if (snap_src) { |
| error = create_snapshot(snap_src, dir, dentry, name, namelen, |
| async_transid, readonly, inherit); |
| } else { |
| error = create_subvol(dir, dentry, name, namelen, |
| async_transid, inherit); |
| } |
| if (!error) |
| fsnotify_mkdir(dir, dentry); |
| out_up_read: |
| up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem); |
| out_dput: |
| dput(dentry); |
| out_unlock: |
| mutex_unlock(&dir->i_mutex); |
| return error; |
| } |
| |
| /* |
| * When we're defragging a range, we don't want to kick it off again |
| * if it is really just waiting for delalloc to send it down. |
| * If we find a nice big extent or delalloc range for the bytes in the |
| * file you want to defrag, we return 0 to let you know to skip this |
| * part of the file |
| */ |
| static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh) |
| { |
| struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| struct extent_map *em = NULL; |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| u64 end; |
| |
| read_lock(&em_tree->lock); |
| em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); |
| read_unlock(&em_tree->lock); |
| |
| if (em) { |
| end = extent_map_end(em); |
| free_extent_map(em); |
| if (end - offset > thresh) |
| return 0; |
| } |
| /* if we already have a nice delalloc here, just stop */ |
| thresh /= 2; |
| end = count_range_bits(io_tree, &offset, offset + thresh, |
| thresh, EXTENT_DELALLOC, 1); |
| if (end >= thresh) |
| return 0; |
| return 1; |
| } |
| |
| /* |
| * helper function to walk through a file and find extents |
| * newer than a specific transid, and smaller than thresh. |
| * |
| * This is used by the defragging code to find new and small |
| * extents |
| */ |
| static int find_new_extents(struct btrfs_root *root, |
| struct inode *inode, u64 newer_than, |
| u64 *off, int thresh) |
| { |
| struct btrfs_path *path; |
| struct btrfs_key min_key; |
| struct extent_buffer *leaf; |
| struct btrfs_file_extent_item *extent; |
| int type; |
| int ret; |
| u64 ino = btrfs_ino(inode); |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| min_key.objectid = ino; |
| min_key.type = BTRFS_EXTENT_DATA_KEY; |
| min_key.offset = *off; |
| |
| while (1) { |
| path->keep_locks = 1; |
| ret = btrfs_search_forward(root, &min_key, path, newer_than); |
| if (ret != 0) |
| goto none; |
| path->keep_locks = 0; |
| btrfs_unlock_up_safe(path, 1); |
| process_slot: |
| if (min_key.objectid != ino) |
| goto none; |
| if (min_key.type != BTRFS_EXTENT_DATA_KEY) |
| goto none; |
| |
| leaf = path->nodes[0]; |
| extent = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| |
| type = btrfs_file_extent_type(leaf, extent); |
| if (type == BTRFS_FILE_EXTENT_REG && |
| btrfs_file_extent_num_bytes(leaf, extent) < thresh && |
| check_defrag_in_cache(inode, min_key.offset, thresh)) { |
| *off = min_key.offset; |
| btrfs_free_path(path); |
| return 0; |
| } |
| |
| path->slots[0]++; |
| if (path->slots[0] < btrfs_header_nritems(leaf)) { |
| btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]); |
| goto process_slot; |
| } |
| |
| if (min_key.offset == (u64)-1) |
| goto none; |
| |
| min_key.offset++; |
| btrfs_release_path(path); |
| } |
| none: |
| btrfs_free_path(path); |
| return -ENOENT; |
| } |
| |
| static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) |
| { |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| struct extent_map *em; |
| u64 len = PAGE_CACHE_SIZE; |
| |
| /* |
| * hopefully we have this extent in the tree already, try without |
| * the full extent lock |
| */ |
| read_lock(&em_tree->lock); |
| em = lookup_extent_mapping(em_tree, start, len); |
| read_unlock(&em_tree->lock); |
| |
| if (!em) { |
| struct extent_state *cached = NULL; |
| u64 end = start + len - 1; |
| |
| /* get the big lock and read metadata off disk */ |
| lock_extent_bits(io_tree, start, end, 0, &cached); |
| em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
| unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS); |
| |
| if (IS_ERR(em)) |
| return NULL; |
| } |
| |
| return em; |
| } |
| |
| static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) |
| { |
| struct extent_map *next; |
| bool ret = true; |
| |
| /* this is the last extent */ |
| if (em->start + em->len >= i_size_read(inode)) |
| return false; |
| |
| next = defrag_lookup_extent(inode, em->start + em->len); |
| if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE || |
| (em->block_start + em->block_len == next->block_start)) |
| ret = false; |
| |
| free_extent_map(next); |
| return ret; |
| } |
| |
| static int should_defrag_range(struct inode *inode, u64 start, int thresh, |
| u64 *last_len, u64 *skip, u64 *defrag_end, |
| int compress) |
| { |
| struct extent_map *em; |
| int ret = 1; |
| bool next_mergeable = true; |
| |
| /* |
| * make sure that once we start defragging an extent, we keep on |
| * defragging it |
| */ |
| if (start < *defrag_end) |
| return 1; |
| |
| *skip = 0; |
| |
| em = defrag_lookup_extent(inode, start); |
| if (!em) |
| return 0; |
| |
| /* this will cover holes, and inline extents */ |
| if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
| ret = 0; |
| goto out; |
| } |
| |
| next_mergeable = defrag_check_next_extent(inode, em); |
| |
| /* |
| * we hit a real extent, if it is big or the next extent is not a |
| * real extent, don't bother defragging it |
| */ |
| if (!compress && (*last_len == 0 || *last_len >= thresh) && |
| (em->len >= thresh || !next_mergeable)) |
| ret = 0; |
| out: |
| /* |
| * last_len ends up being a counter of how many bytes we've defragged. |
| * every time we choose not to defrag an extent, we reset *last_len |
| * so that the next tiny extent will force a defrag. |
| * |
| * The end result of this is that tiny extents before a single big |
| * extent will force at least part of that big extent to be defragged. |
| */ |
| if (ret) { |
| *defrag_end = extent_map_end(em); |
| } else { |
| *last_len = 0; |
| *skip = extent_map_end(em); |
| *defrag_end = 0; |
| } |
| |
| free_extent_map(em); |
| return ret; |
| } |
| |
| /* |
| * it doesn't do much good to defrag one or two pages |
| * at a time. This pulls in a nice chunk of pages |
| * to COW and defrag. |
| * |
| * It also makes sure the delalloc code has enough |
| * dirty data to avoid making new small extents as part |
| * of the defrag |
| * |
| * It's a good idea to start RA on this range |
| * before calling this. |
| */ |
| static int cluster_pages_for_defrag(struct inode *inode, |
| struct page **pages, |
| unsigned long start_index, |
| unsigned long num_pages) |
| { |
| unsigned long file_end; |
| u64 isize = i_size_read(inode); |
| u64 page_start; |
| u64 page_end; |
| u64 page_cnt; |
| int ret; |
| int i; |
| int i_done; |
| struct btrfs_ordered_extent *ordered; |
| struct extent_state *cached_state = NULL; |
| struct extent_io_tree *tree; |
| gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); |
| |
| file_end = (isize - 1) >> PAGE_CACHE_SHIFT; |
| if (!isize || start_index > file_end) |
| return 0; |
| |
| page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); |
| |
| ret = btrfs_delalloc_reserve_space(inode, |
| page_cnt << PAGE_CACHE_SHIFT); |
| if (ret) |
| return ret; |
| i_done = 0; |
| tree = &BTRFS_I(inode)->io_tree; |
| |
| /* step one, lock all the pages */ |
| for (i = 0; i < page_cnt; i++) { |
| struct page *page; |
| again: |
| page = find_or_create_page(inode->i_mapping, |
| start_index + i, mask); |
| if (!page) |
| break; |
| |
| page_start = page_offset(page); |
| page_end = page_start + PAGE_CACHE_SIZE - 1; |
| while (1) { |
| lock_extent_bits(tree, page_start, page_end, |
| 0, &cached_state); |
| ordered = btrfs_lookup_ordered_extent(inode, |
| page_start); |
| unlock_extent_cached(tree, page_start, page_end, |
| &cached_state, GFP_NOFS); |
| if (!ordered) |
| break; |
| |
| unlock_page(page); |
| btrfs_start_ordered_extent(inode, ordered, 1); |
| btrfs_put_ordered_extent(ordered); |
| lock_page(page); |
| /* |
| * we unlocked the page above, so we need check if |
| * it was released or not. |
| */ |
| if (page->mapping != inode->i_mapping) { |
| unlock_page(page); |
| page_cache_release(page); |
| goto again; |
| } |
| } |
| |
| if (!PageUptodate(page)) { |
| btrfs_readpage(NULL, page); |
| lock_page(page); |
| if (!PageUptodate(page)) { |
| unlock_page(page); |
| page_cache_release(page); |
| ret = -EIO; |
| break; |
| } |
| } |
| |
| if (page->mapping != inode->i_mapping) { |
| unlock_page(page); |
| page_cache_release(page); |
| goto again; |
| } |
| |
| pages[i] = page; |
| i_done++; |
| } |
| if (!i_done || ret) |
| goto out; |
| |
| if (!(inode->i_sb->s_flags & MS_ACTIVE)) |
| goto out; |
| |
| /* |
| * so now we have a nice long stream of locked |
| * and up to date pages, lets wait on them |
| */ |
| for (i = 0; i < i_done; i++) |
| wait_on_page_writeback(pages[i]); |
| |
| page_start = page_offset(pages[0]); |
| page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; |
| |
| lock_extent_bits(&BTRFS_I(inode)->io_tree, |
| page_start, page_end - 1, 0, &cached_state); |
| clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, |
| page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | |
| EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, |
| &cached_state, GFP_NOFS); |
| |
| if (i_done != page_cnt) { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents++; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| btrfs_delalloc_release_space(inode, |
| (page_cnt - i_done) << PAGE_CACHE_SHIFT); |
| } |
| |
| |
| set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, |
| &cached_state, GFP_NOFS); |
| |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
| page_start, page_end - 1, &cached_state, |
| GFP_NOFS); |
| |
| for (i = 0; i < i_done; i++) { |
| clear_page_dirty_for_io(pages[i]); |
| ClearPageChecked(pages[i]); |
| set_page_extent_mapped(pages[i]); |
| set_page_dirty(pages[i]); |
| unlock_page(pages[i]); |
| page_cache_release(pages[i]); |
| } |
| return i_done; |
| out: |
| for (i = 0; i < i_done; i++) { |
| unlock_page(pages[i]); |
| page_cache_release(pages[i]); |
| } |
| btrfs_delalloc_release_space(inode, page_cnt << PAGE_CACHE_SHIFT); |
| return ret; |
| |
| } |
| |
| int btrfs_defrag_file(struct inode *inode, struct file *file, |
| struct btrfs_ioctl_defrag_range_args *range, |
| u64 newer_than, unsigned long max_to_defrag) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct file_ra_state *ra = NULL; |
| unsigned long last_index; |
| u64 isize = i_size_read(inode); |
| u64 last_len = 0; |
| u64 skip = 0; |
| u64 defrag_end = 0; |
| u64 newer_off = range->start; |
| unsigned long i; |
| unsigned long ra_index = 0; |
| int ret; |
| int defrag_count = 0; |
| int compress_type = BTRFS_COMPRESS_ZLIB; |
| int extent_thresh = range->extent_thresh; |
| unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; |
| unsigned long cluster = max_cluster; |
| u64 new_align = ~((u64)128 * 1024 - 1); |
| struct page **pages = NULL; |
| |
| if (isize == 0) |
| return 0; |
| |
| if (range->start >= isize) |
| return -EINVAL; |
| |
| if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { |
| if (range->compress_type > BTRFS_COMPRESS_TYPES) |
| return -EINVAL; |
| if (range->compress_type) |
| compress_type = range->compress_type; |
| } |
| |
| if (extent_thresh == 0) |
| extent_thresh = 256 * 1024; |
| |
| /* |
| * if we were not given a file, allocate a readahead |
| * context |
| */ |
| if (!file) { |
| ra = kzalloc(sizeof(*ra), GFP_NOFS); |
| if (!ra) |
| return -ENOMEM; |
| file_ra_state_init(ra, inode->i_mapping); |
| } else { |
| ra = &file->f_ra; |
| } |
| |
| pages = kmalloc_array(max_cluster, sizeof(struct page *), |
| GFP_NOFS); |
| if (!pages) { |
| ret = -ENOMEM; |
| goto out_ra; |
| } |
| |
| /* find the last page to defrag */ |
| if (range->start + range->len > range->start) { |
| last_index = min_t(u64, isize - 1, |
| range->start + range->len - 1) >> PAGE_CACHE_SHIFT; |
| } else { |
| last_index = (isize - 1) >> PAGE_CACHE_SHIFT; |
| } |
| |
| if (newer_than) { |
| ret = find_new_extents(root, inode, newer_than, |
| &newer_off, 64 * 1024); |
| if (!ret) { |
| range->start = newer_off; |
| /* |
| * we always align our defrag to help keep |
| * the extents in the file evenly spaced |
| */ |
| i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; |
| } else |
| goto out_ra; |
| } else { |
| i = range->start >> PAGE_CACHE_SHIFT; |
| } |
| if (!max_to_defrag) |
| max_to_defrag = last_index + 1; |
| |
| /* |
| * make writeback starts from i, so the defrag range can be |
| * written sequentially. |
| */ |
| if (i < inode->i_mapping->writeback_index) |
| inode->i_mapping->writeback_index = i; |
| |
| while (i <= last_index && defrag_count < max_to_defrag && |
| (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
| PAGE_CACHE_SHIFT)) { |
| /* |
| * make sure we stop running if someone unmounts |
| * the FS |
| */ |
| if (!(inode->i_sb->s_flags & MS_ACTIVE)) |
| break; |
| |
| if (btrfs_defrag_cancelled(root->fs_info)) { |
| printk(KERN_DEBUG "BTRFS: defrag_file cancelled\n"); |
| ret = -EAGAIN; |
| break; |
| } |
| |
| if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, |
| extent_thresh, &last_len, &skip, |
| &defrag_end, range->flags & |
| BTRFS_DEFRAG_RANGE_COMPRESS)) { |
| unsigned long next; |
| /* |
| * the should_defrag function tells us how much to skip |
| * bump our counter by the suggested amount |
| */ |
| next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| i = max(i + 1, next); |
| continue; |
| } |
| |
| if (!newer_than) { |
| cluster = (PAGE_CACHE_ALIGN(defrag_end) >> |
| PAGE_CACHE_SHIFT) - i; |
| cluster = min(cluster, max_cluster); |
| } else { |
| cluster = max_cluster; |
| } |
| |
| if (i + cluster > ra_index) { |
| ra_index = max(i, ra_index); |
| btrfs_force_ra(inode->i_mapping, ra, file, ra_index, |
| cluster); |
| ra_index += max_cluster; |
| } |
| |
| mutex_lock(&inode->i_mutex); |
| if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) |
| BTRFS_I(inode)->force_compress = compress_type; |
| ret = cluster_pages_for_defrag(inode, pages, i, cluster); |
| if (ret < 0) { |
| mutex_unlock(&inode->i_mutex); |
| goto out_ra; |
| } |
| |
| defrag_count += ret; |
| balance_dirty_pages_ratelimited(inode->i_mapping); |
| mutex_unlock(&inode->i_mutex); |
| |
| if (newer_than) { |
| if (newer_off == (u64)-1) |
| break; |
| |
| if (ret > 0) |
| i += ret; |
| |
| newer_off = max(newer_off + 1, |
| (u64)i << PAGE_CACHE_SHIFT); |
| |
| ret = find_new_extents(root, inode, |
| newer_than, &newer_off, |
| 64 * 1024); |
| if (!ret) { |
| range->start = newer_off; |
| i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; |
| } else { |
| break; |
| } |
| } else { |
| if (ret > 0) { |
| i += ret; |
| last_len += ret << PAGE_CACHE_SHIFT; |
| } else { |
| i++; |
| last_len = 0; |
| } |
| } |
| } |
| |
| if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) { |
| filemap_flush(inode->i_mapping); |
| if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, |
| &BTRFS_I(inode)->runtime_flags)) |
| filemap_flush(inode->i_mapping); |
| } |
| |
| if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { |
| /* the filemap_flush will queue IO into the worker threads, but |
| * we have to make sure the IO is actually started and that |
| * ordered extents get created before we return |
| */ |
| atomic_inc(&root->fs_info->async_submit_draining); |
| while (atomic_read(&root->fs_info->nr_async_submits) || |
| atomic_read(&root->fs_info->async_delalloc_pages)) { |
| wait_event(root->fs_info->async_submit_wait, |
| (atomic_read(&root->fs_info->nr_async_submits) == 0 && |
| atomic_read(&root->fs_info->async_delalloc_pages) == 0)); |
| } |
| atomic_dec(&root->fs_info->async_submit_draining); |
| } |
| |
| if (range->compress_type == BTRFS_COMPRESS_LZO) { |
| btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO); |
| } |
| |
| ret = defrag_count; |
| |
| out_ra: |
| if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { |
| mutex_lock(&inode->i_mutex); |
| BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE; |
| mutex_unlock(&inode->i_mutex); |
| } |
| if (!file) |
| kfree(ra); |
| kfree(pages); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_resize(struct file *file, |
| void __user *arg) |
| { |
| u64 new_size; |
| u64 old_size; |
| u64 devid = 1; |
| struct btrfs_root *root = BTRFS_I(file_inode(file))->root; |
| struct btrfs_ioctl_vol_args *vol_args; |
| struct btrfs_trans_handle *trans; |
| struct btrfs_device *device = NULL; |
| char *sizestr; |
| char *retptr; |
| char *devstr = NULL; |
| int ret = 0; |
| int mod = 0; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| return ret; |
| |
| if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, |
| 1)) { |
| mnt_drop_write_file(file); |
| return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; |
| } |
| |
| mutex_lock(&root->fs_info->volume_mutex); |
| vol_args = memdup_user(arg, sizeof(*vol_args)); |
| if (IS_ERR(vol_args)) { |
| ret = PTR_ERR(vol_args); |
| goto out; |
| } |
| |
| vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; |
| |
| sizestr = vol_args->name; |
| devstr = strchr(sizestr, ':'); |
| if (devstr) { |
| sizestr = devstr + 1; |
| *devstr = '\0'; |
| devstr = vol_args->name; |
| ret = kstrtoull(devstr, 10, &devid); |
| if (ret) |
| goto out_free; |
| if (!devid) { |
| ret = -EINVAL; |
| goto out_free; |
| } |
| btrfs_info(root->fs_info, "resizing devid %llu", devid); |
| } |
| |
| device = btrfs_find_device(root->fs_info, devid, NULL, NULL); |
| if (!device) { |
| btrfs_info(root->fs_info, "resizer unable to find device %llu", |
| devid); |
| ret = -ENODEV; |
| goto out_free; |
| } |
| |
| if (!device->writeable) { |
| btrfs_info(root->fs_info, |
| "resizer unable to apply on readonly device %llu", |
| devid); |
| ret = -EPERM; |
| goto out_free; |
| } |
| |
| if (!strcmp(sizestr, "max")) |
| new_size = device->bdev->bd_inode->i_size; |
| else { |
| if (sizestr[0] == '-') { |
| mod = -1; |
| sizestr++; |
| } else if (sizestr[0] == '+') { |
| mod = 1; |
| sizestr++; |
| } |
| new_size = memparse(sizestr, &retptr); |
| if (*retptr != '\0' || new_size == 0) { |
| ret = -EINVAL; |
| goto out_free; |
| } |
| } |
| |
| if (device->is_tgtdev_for_dev_replace) { |
| ret = -EPERM; |
| goto out_free; |
| } |
| |
| old_size = device->total_bytes; |
| |
| if (mod < 0) { |
| if (new_size > old_size) { |
| ret = -EINVAL; |
| goto out_free; |
| } |
| new_size = old_size - new_size; |
| } else if (mod > 0) { |
| if (new_size > ULLONG_MAX - old_size) { |
| ret = -ERANGE; |
| goto out_free; |
| } |
| new_size = old_size + new_size; |
| } |
| |
| if (new_size < 256 * 1024 * 1024) { |
| ret = -EINVAL; |
| goto out_free; |
| } |
| if (new_size > device->bdev->bd_inode->i_size) { |
| ret = -EFBIG; |
| goto out_free; |
| } |
| |
| do_div(new_size, root->sectorsize); |
| new_size *= root->sectorsize; |
| |
| printk_in_rcu(KERN_INFO "BTRFS: new size for %s is %llu\n", |
| rcu_str_deref(device->name), new_size); |
| |
| if (new_size > old_size) { |
| trans = btrfs_start_transaction(root, 0); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| goto out_free; |
| } |
| ret = btrfs_grow_device(trans, device, new_size); |
| btrfs_commit_transaction(trans, root); |
| } else if (new_size < old_size) { |
| ret = btrfs_shrink_device(device, new_size); |
| } /* equal, nothing need to do */ |
| |
| out_free: |
| kfree(vol_args); |
| out: |
| mutex_unlock(&root->fs_info->volume_mutex); |
| atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); |
| mnt_drop_write_file(file); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_snap_create_transid(struct file *file, |
| char *name, unsigned long fd, int subvol, |
| u64 *transid, bool readonly, |
| struct btrfs_qgroup_inherit *inherit) |
| { |
| int namelen; |
| int ret = 0; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| goto out; |
| |
| namelen = strlen(name); |
| if (strchr(name, '/')) { |
| ret = -EINVAL; |
| goto out_drop_write; |
| } |
| |
| if (name[0] == '.' && |
| (namelen == 1 || (name[1] == '.' && namelen == 2))) { |
| ret = -EEXIST; |
| goto out_drop_write; |
| } |
| |
| if (subvol) { |
| ret = btrfs_mksubvol(&file->f_path, name, namelen, |
| NULL, transid, readonly, inherit); |
| } else { |
| struct fd src = fdget(fd); |
| struct inode *src_inode; |
| if (!src.file) { |
| ret = -EINVAL; |
| goto out_drop_write; |
| } |
| |
| src_inode = file_inode(src.file); |
| if (src_inode->i_sb != file_inode(file)->i_sb) { |
| btrfs_info(BTRFS_I(src_inode)->root->fs_info, |
| "Snapshot src from another FS"); |
| ret = -EXDEV; |
| } else if (!inode_owner_or_capable(src_inode)) { |
| /* |
| * Subvolume creation is not restricted, but snapshots |
| * are limited to own subvolumes only |
| */ |
| ret = -EPERM; |
| } else { |
| ret = btrfs_mksubvol(&file->f_path, name, namelen, |
| BTRFS_I(src_inode)->root, |
| transid, readonly, inherit); |
| } |
| fdput(src); |
| } |
| out_drop_write: |
| mnt_drop_write_file(file); |
| out: |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_snap_create(struct file *file, |
| void __user *arg, int subvol) |
| { |
| struct btrfs_ioctl_vol_args *vol_args; |
| int ret; |
| |
| vol_args = memdup_user(arg, sizeof(*vol_args)); |
| if (IS_ERR(vol_args)) |
| return PTR_ERR(vol_args); |
| vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; |
| |
| ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, |
| vol_args->fd, subvol, |
| NULL, false, NULL); |
| |
| kfree(vol_args); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_snap_create_v2(struct file *file, |
| void __user *arg, int subvol) |
| { |
| struct btrfs_ioctl_vol_args_v2 *vol_args; |
| int ret; |
| u64 transid = 0; |
| u64 *ptr = NULL; |
| bool readonly = false; |
| struct btrfs_qgroup_inherit *inherit = NULL; |
| |
| vol_args = memdup_user(arg, sizeof(*vol_args)); |
| if (IS_ERR(vol_args)) |
| return PTR_ERR(vol_args); |
| vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; |
| |
| if (vol_args->flags & |
| ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY | |
| BTRFS_SUBVOL_QGROUP_INHERIT)) { |
| ret = -EOPNOTSUPP; |
| goto out; |
| } |
| |
| if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) |
| ptr = &transid; |
| if (vol_args->flags & BTRFS_SUBVOL_RDONLY) |
| readonly = true; |
| if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { |
| if (vol_args->size > PAGE_CACHE_SIZE) { |
| ret = -EINVAL; |
| goto out; |
| } |
| inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size); |
| if (IS_ERR(inherit)) { |
| ret = PTR_ERR(inherit); |
| goto out; |
| } |
| } |
| |
| ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, |
| vol_args->fd, subvol, ptr, |
| readonly, inherit); |
| |
| if (ret == 0 && ptr && |
| copy_to_user(arg + |
| offsetof(struct btrfs_ioctl_vol_args_v2, |
| transid), ptr, sizeof(*ptr))) |
| ret = -EFAULT; |
| out: |
| kfree(vol_args); |
| kfree(inherit); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_subvol_getflags(struct file *file, |
| void __user *arg) |
| { |
| struct inode *inode = file_inode(file); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| int ret = 0; |
| u64 flags = 0; |
| |
| if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) |
| return -EINVAL; |
| |
| down_read(&root->fs_info->subvol_sem); |
| if (btrfs_root_readonly(root)) |
| flags |= BTRFS_SUBVOL_RDONLY; |
| up_read(&root->fs_info->subvol_sem); |
| |
| if (copy_to_user(arg, &flags, sizeof(flags))) |
| ret = -EFAULT; |
| |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_subvol_setflags(struct file *file, |
| void __user *arg) |
| { |
| struct inode *inode = file_inode(file); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_trans_handle *trans; |
| u64 root_flags; |
| u64 flags; |
| int ret = 0; |
| |
| if (!inode_owner_or_capable(inode)) |
| return -EPERM; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| goto out; |
| |
| if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { |
| ret = -EINVAL; |
| goto out_drop_write; |
| } |
| |
| if (copy_from_user(&flags, arg, sizeof(flags))) { |
| ret = -EFAULT; |
| goto out_drop_write; |
| } |
| |
| if (flags & BTRFS_SUBVOL_CREATE_ASYNC) { |
| ret = -EINVAL; |
| goto out_drop_write; |
| } |
| |
| if (flags & ~BTRFS_SUBVOL_RDONLY) { |
| ret = -EOPNOTSUPP; |
| goto out_drop_write; |
| } |
| |
| down_write(&root->fs_info->subvol_sem); |
| |
| /* nothing to do */ |
| if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root)) |
| goto out_drop_sem; |
| |
| root_flags = btrfs_root_flags(&root->root_item); |
| if (flags & BTRFS_SUBVOL_RDONLY) { |
| btrfs_set_root_flags(&root->root_item, |
| root_flags | BTRFS_ROOT_SUBVOL_RDONLY); |
| } else { |
| /* |
| * Block RO -> RW transition if this subvolume is involved in |
| * send |
| */ |
| spin_lock(&root->root_item_lock); |
| if (root->send_in_progress == 0) { |
| btrfs_set_root_flags(&root->root_item, |
| root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY); |
| spin_unlock(&root->root_item_lock); |
| } else { |
| spin_unlock(&root->root_item_lock); |
| btrfs_warn(root->fs_info, |
| "Attempt to set subvolume %llu read-write during send", |
| root->root_key.objectid); |
| ret = -EPERM; |
| goto out_drop_sem; |
| } |
| } |
| |
| trans = btrfs_start_transaction(root, 1); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| goto out_reset; |
| } |
| |
| ret = btrfs_update_root(trans, root->fs_info->tree_root, |
| &root->root_key, &root->root_item); |
| |
| btrfs_commit_transaction(trans, root); |
| out_reset: |
| if (ret) |
| btrfs_set_root_flags(&root->root_item, root_flags); |
| out_drop_sem: |
| up_write(&root->fs_info->subvol_sem); |
| out_drop_write: |
| mnt_drop_write_file(file); |
| out: |
| return ret; |
| } |
| |
| /* |
| * helper to check if the subvolume references other subvolumes |
| */ |
| static noinline int may_destroy_subvol(struct btrfs_root *root) |
| { |
| struct btrfs_path *path; |
| struct btrfs_dir_item *di; |
| struct btrfs_key key; |
| u64 dir_id; |
| int ret; |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| /* Make sure this root isn't set as the default subvol */ |
| dir_id = btrfs_super_root_dir(root->fs_info->super_copy); |
| di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root, path, |
| dir_id, "default", 7, 0); |
| if (di && !IS_ERR(di)) { |
| btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); |
| if (key.objectid == root->root_key.objectid) { |
| ret = -EPERM; |
| btrfs_err(root->fs_info, "deleting default subvolume " |
| "%llu is not allowed", key.objectid); |
| goto out; |
| } |
| btrfs_release_path(path); |
| } |
| |
| key.objectid = root->root_key.objectid; |
| key.type = BTRFS_ROOT_REF_KEY; |
| key.offset = (u64)-1; |
| |
| ret = btrfs_search_slot(NULL, root->fs_info->tree_root, |
| &key, path, 0, 0); |
| if (ret < 0) |
| goto out; |
| BUG_ON(ret == 0); |
| |
| ret = 0; |
| if (path->slots[0] > 0) { |
| path->slots[0]--; |
| btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); |
| if (key.objectid == root->root_key.objectid && |
| key.type == BTRFS_ROOT_REF_KEY) |
| ret = -ENOTEMPTY; |
| } |
| out: |
| btrfs_free_path(path); |
| return ret; |
| } |
| |
| static noinline int key_in_sk(struct btrfs_key *key, |
| struct btrfs_ioctl_search_key *sk) |
| { |
| struct btrfs_key test; |
| int ret; |
| |
| test.objectid = sk->min_objectid; |
| test.type = sk->min_type; |
| test.offset = sk->min_offset; |
| |
| ret = btrfs_comp_cpu_keys(key, &test); |
| if (ret < 0) |
| return 0; |
| |
| test.objectid = sk->max_objectid; |
| test.type = sk->max_type; |
| test.offset = sk->max_offset; |
| |
| ret = btrfs_comp_cpu_keys(key, &test); |
| if (ret > 0) |
| return 0; |
| return 1; |
| } |
| |
| static noinline int copy_to_sk(struct btrfs_root *root, |
| struct btrfs_path *path, |
| struct btrfs_key *key, |
| struct btrfs_ioctl_search_key *sk, |
| size_t *buf_size, |
| char __user *ubuf, |
| unsigned long *sk_offset, |
| int *num_found) |
| { |
| u64 found_transid; |
| struct extent_buffer *leaf; |
| struct btrfs_ioctl_search_header sh; |
| unsigned long item_off; |
| unsigned long item_len; |
| int nritems; |
| int i; |
| int slot; |
| int ret = 0; |
| |
| leaf = path->nodes[0]; |
| slot = path->slots[0]; |
| nritems = btrfs_header_nritems(leaf); |
| |
| if (btrfs_header_generation(leaf) > sk->max_transid) { |
| i = nritems; |
| goto advance_key; |
| } |
| found_transid = btrfs_header_generation(leaf); |
| |
| for (i = slot; i < nritems; i++) { |
| item_off = btrfs_item_ptr_offset(leaf, i); |
| item_len = btrfs_item_size_nr(leaf, i); |
| |
| btrfs_item_key_to_cpu(leaf, key, i); |
| if (!key_in_sk(key, sk)) |
| continue; |
| |
| if (sizeof(sh) + item_len > *buf_size) { |
| if (*num_found) { |
| ret = 1; |
| goto out; |
| } |
| |
| /* |
| * return one empty item back for v1, which does not |
| * handle -EOVERFLOW |
| */ |
| |
| *buf_size = sizeof(sh) + item_len; |
| item_len = 0; |
| ret = -EOVERFLOW; |
| } |
| |
| if (sizeof(sh) + item_len + *sk_offset > *buf_size) { |
| ret = 1; |
| goto out; |
| } |
| |
| sh.objectid = key->objectid; |
| sh.offset = key->offset; |
| sh.type = key->type; |
| sh.len = item_len; |
| sh.transid = found_transid; |
| |
| /* copy search result header */ |
| if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { |
| ret = -EFAULT; |
| goto out; |
| } |
| |
| *sk_offset += sizeof(sh); |
| |
| if (item_len) { |
| char __user *up = ubuf + *sk_offset; |
| /* copy the item */ |
| if (read_extent_buffer_to_user(leaf, up, |
| item_off, item_len)) { |
| ret = -EFAULT; |
| goto out; |
| } |
| |
| *sk_offset += item_len; |
| } |
| (*num_found)++; |
| |
| if (ret) /* -EOVERFLOW from above */ |
| goto out; |
| |
| if (*num_found >= sk->nr_items) { |
| ret = 1; |
| goto out; |
| } |
| } |
| advance_key: |
| ret = 0; |
| if (key->offset < (u64)-1 && key->offset < sk->max_offset) |
| key->offset++; |
| else if (key->type < (u8)-1 && key->type < sk->max_type) { |
| key->offset = 0; |
| key->type++; |
| } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { |
| key->offset = 0; |
| key->type = 0; |
| key->objectid++; |
| } else |
| ret = 1; |
| out: |
| /* |
| * 0: all items from this leaf copied, continue with next |
| * 1: * more items can be copied, but unused buffer is too small |
| * * all items were found |
| * Either way, it will stops the loop which iterates to the next |
| * leaf |
| * -EOVERFLOW: item was to large for buffer |
| * -EFAULT: could not copy extent buffer back to userspace |
| */ |
| return ret; |
| } |
| |
| static noinline int search_ioctl(struct inode *inode, |
| struct btrfs_ioctl_search_key *sk, |
| size_t *buf_size, |
| char __user *ubuf) |
| { |
| struct btrfs_root *root; |
| struct btrfs_key key; |
| struct btrfs_path *path; |
| struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; |
| int ret; |
| int num_found = 0; |
| unsigned long sk_offset = 0; |
| |
| if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) { |
| *buf_size = sizeof(struct btrfs_ioctl_search_header); |
| return -EOVERFLOW; |
| } |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| if (sk->tree_id == 0) { |
| /* search the root of the inode that was passed */ |
| root = BTRFS_I(inode)->root; |
| } else { |
| key.objectid = sk->tree_id; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| root = btrfs_read_fs_root_no_name(info, &key); |
| if (IS_ERR(root)) { |
| printk(KERN_ERR "BTRFS: could not find root %llu\n", |
| sk->tree_id); |
| btrfs_free_path(path); |
| return -ENOENT; |
| } |
| } |
| |
| key.objectid = sk->min_objectid; |
| key.type = sk->min_type; |
| key.offset = sk->min_offset; |
| |
| path->keep_locks = 1; |
| |
| while (1) { |
| ret = btrfs_search_forward(root, &key, path, sk->min_transid); |
| if (ret != 0) { |
| if (ret > 0) |
| ret = 0; |
| goto err; |
| } |
| ret = copy_to_sk(root, path, &key, sk, buf_size, ubuf, |
| &sk_offset, &num_found); |
| btrfs_release_path(path); |
| if (ret) |
| break; |
| |
| } |
| if (ret > 0) |
| ret = 0; |
| err: |
| sk->nr_items = num_found; |
| btrfs_free_path(path); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_tree_search(struct file *file, |
| void __user *argp) |
| { |
| struct btrfs_ioctl_search_args __user *uargs; |
| struct btrfs_ioctl_search_key sk; |
| struct inode *inode; |
| int ret; |
| size_t buf_size; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| uargs = (struct btrfs_ioctl_search_args __user *)argp; |
| |
| if (copy_from_user(&sk, &uargs->key, sizeof(sk))) |
| return -EFAULT; |
| |
| buf_size = sizeof(uargs->buf); |
| |
| inode = file_inode(file); |
| ret = search_ioctl(inode, &sk, &buf_size, uargs->buf); |
| |
| /* |
| * In the origin implementation an overflow is handled by returning a |
| * search header with a len of zero, so reset ret. |
| */ |
| if (ret == -EOVERFLOW) |
| ret = 0; |
| |
| if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk))) |
| ret = -EFAULT; |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_tree_search_v2(struct file *file, |
| void __user *argp) |
| { |
| struct btrfs_ioctl_search_args_v2 __user *uarg; |
| struct btrfs_ioctl_search_args_v2 args; |
| struct inode *inode; |
| int ret; |
| size_t buf_size; |
| const size_t buf_limit = 16 * 1024 * 1024; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| /* copy search header and buffer size */ |
| uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp; |
| if (copy_from_user(&args, uarg, sizeof(args))) |
| return -EFAULT; |
| |
| buf_size = args.buf_size; |
| |
| if (buf_size < sizeof(struct btrfs_ioctl_search_header)) |
| return -EOVERFLOW; |
| |
| /* limit result size to 16MB */ |
| if (buf_size > buf_limit) |
| buf_size = buf_limit; |
| |
| inode = file_inode(file); |
| ret = search_ioctl(inode, &args.key, &buf_size, |
| (char *)(&uarg->buf[0])); |
| if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key))) |
| ret = -EFAULT; |
| else if (ret == -EOVERFLOW && |
| copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size))) |
| ret = -EFAULT; |
| |
| return ret; |
| } |
| |
| /* |
| * Search INODE_REFs to identify path name of 'dirid' directory |
| * in a 'tree_id' tree. and sets path name to 'name'. |
| */ |
| static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, |
| u64 tree_id, u64 dirid, char *name) |
| { |
| struct btrfs_root *root; |
| struct btrfs_key key; |
| char *ptr; |
| int ret = -1; |
| int slot; |
| int len; |
| int total_len = 0; |
| struct btrfs_inode_ref *iref; |
| struct extent_buffer *l; |
| struct btrfs_path *path; |
| |
| if (dirid == BTRFS_FIRST_FREE_OBJECTID) { |
| name[0]='\0'; |
| return 0; |
| } |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; |
| |
| key.objectid = tree_id; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| root = btrfs_read_fs_root_no_name(info, &key); |
| if (IS_ERR(root)) { |
| printk(KERN_ERR "BTRFS: could not find root %llu\n", tree_id); |
| ret = -ENOENT; |
| goto out; |
| } |
| |
| key.objectid = dirid; |
| key.type = BTRFS_INODE_REF_KEY; |
| key.offset = (u64)-1; |
| |
| while (1) { |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (ret < 0) |
| goto out; |
| else if (ret > 0) { |
| ret = btrfs_previous_item(root, path, dirid, |
| BTRFS_INODE_REF_KEY); |
| if (ret < 0) |
| goto out; |
| else if (ret > 0) { |
| ret = -ENOENT; |
| goto out; |
| } |
| } |
| |
| l = path->nodes[0]; |
| slot = path->slots[0]; |
| btrfs_item_key_to_cpu(l, &key, slot); |
| |
| iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); |
| len = btrfs_inode_ref_name_len(l, iref); |
| ptr -= len + 1; |
| total_len += len + 1; |
| if (ptr < name) { |
| ret = -ENAMETOOLONG; |
| goto out; |
| } |
| |
| *(ptr + len) = '/'; |
| read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len); |
| |
| if (key.offset == BTRFS_FIRST_FREE_OBJECTID) |
| break; |
| |
| btrfs_release_path(path); |
| key.objectid = key.offset; |
| key.offset = (u64)-1; |
| dirid = key.objectid; |
| } |
| memmove(name, ptr, total_len); |
| name[total_len] = '\0'; |
| ret = 0; |
| out: |
| btrfs_free_path(path); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_ino_lookup(struct file *file, |
| void __user *argp) |
| { |
| struct btrfs_ioctl_ino_lookup_args *args; |
| struct inode *inode; |
| int ret; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| args = memdup_user(argp, sizeof(*args)); |
| if (IS_ERR(args)) |
| return PTR_ERR(args); |
| |
| inode = file_inode(file); |
| |
| if (args->treeid == 0) |
| args->treeid = BTRFS_I(inode)->root->root_key.objectid; |
| |
| ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, |
| args->treeid, args->objectid, |
| args->name); |
| |
| if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) |
| ret = -EFAULT; |
| |
| kfree(args); |
| return ret; |
| } |
| |
| static noinline int btrfs_ioctl_snap_destroy(struct file *file, |
| void __user *arg) |
| { |
| struct dentry *parent = file->f_path.dentry; |
| struct dentry *dentry; |
| struct inode *dir = parent->d_inode; |
| struct inode *inode; |
| struct btrfs_root *root = BTRFS_I(dir)->root; |
| struct btrfs_root *dest = NULL; |
| struct btrfs_ioctl_vol_args *vol_args; |
| struct btrfs_trans_handle *trans; |
| struct btrfs_block_rsv block_rsv; |
| u64 root_flags; |
| u64 qgroup_reserved; |
| int namelen; |
| int ret; |
| int err = 0; |
| |
| vol_args = memdup_user(arg, sizeof(*vol_args)); |
| if (IS_ERR(vol_args)) |
| return PTR_ERR(vol_args); |
| |
| vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; |
| namelen = strlen(vol_args->name); |
| if (strchr(vol_args->name, '/') || |
| strncmp(vol_args->name, "..", namelen) == 0) { |
| err = -EINVAL; |
| goto out; |
| } |
| |
| err = mnt_want_write_file(file); |
| if (err) |
| goto out; |
| |
| |
| err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| if (err == -EINTR) |
| goto out_drop_write; |
| dentry = lookup_one_len(vol_args->name, parent, namelen); |
| if (IS_ERR(dentry)) { |
| err = PTR_ERR(dentry); |
| goto out_unlock_dir; |
| } |
| |
| if (!dentry->d_inode) { |
| err = -ENOENT; |
| goto out_dput; |
| } |
| |
| inode = dentry->d_inode; |
| dest = BTRFS_I(inode)->root; |
| if (!capable(CAP_SYS_ADMIN)) { |
| /* |
| * Regular user. Only allow this with a special mount |
| * option, when the user has write+exec access to the |
| * subvol root, and when rmdir(2) would have been |
| * allowed. |
| * |
| * Note that this is _not_ check that the subvol is |
| * empty or doesn't contain data that we wouldn't |
| * otherwise be able to delete. |
| * |
| * Users who want to delete empty subvols should try |
| * rmdir(2). |
| */ |
| err = -EPERM; |
| if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) |
| goto out_dput; |
| |
| /* |
| * Do not allow deletion if the parent dir is the same |
| * as the dir to be deleted. That means the ioctl |
| * must be called on the dentry referencing the root |
| * of the subvol, not a random directory contained |
| * within it. |
| */ |
| err = -EINVAL; |
| if (root == dest) |
| goto out_dput; |
| |
| err = inode_permission(inode, MAY_WRITE | MAY_EXEC); |
| if (err) |
| goto out_dput; |
| } |
| |
| /* check if subvolume may be deleted by a user */ |
| err = btrfs_may_delete(dir, dentry, 1); |
| if (err) |
| goto out_dput; |
| |
| if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { |
| err = -EINVAL; |
| goto out_dput; |
| } |
| |
| mutex_lock(&inode->i_mutex); |
| |
| /* |
| * Don't allow to delete a subvolume with send in progress. This is |
| * inside the i_mutex so the error handling that has to drop the bit |
| * again is not run concurrently. |
| */ |
| spin_lock(&dest->root_item_lock); |
| root_flags = btrfs_root_flags(&dest->root_item); |
| if (dest->send_in_progress == 0) { |
| btrfs_set_root_flags(&dest->root_item, |
| root_flags | BTRFS_ROOT_SUBVOL_DEAD); |
| spin_unlock(&dest->root_item_lock); |
| } else { |
| spin_unlock(&dest->root_item_lock); |
| btrfs_warn(root->fs_info, |
| "Attempt to delete subvolume %llu during send", |
| dest->root_key.objectid); |
| err = -EPERM; |
| goto out_dput; |
| } |
| |
| err = d_invalidate(dentry); |
| if (err) |
| goto out_unlock; |
| |
| down_write(&root->fs_info->subvol_sem); |
| |
| err = may_destroy_subvol(dest); |
| if (err) |
| goto out_up_write; |
| |
| btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); |
| /* |
| * One for dir inode, two for dir entries, two for root |
| * ref/backref. |
| */ |
| err = btrfs_subvolume_reserve_metadata(root, &block_rsv, |
| 5, &qgroup_reserved, true); |
| if (err) |
| goto out_up_write; |
| |
| trans = btrfs_start_transaction(root, 0); |
| if (IS_ERR(trans)) { |
| err = PTR_ERR(trans); |
| goto out_release; |
| } |
| trans->block_rsv = &block_rsv; |
| trans->bytes_reserved = block_rsv.size; |
| |
| ret = btrfs_unlink_subvol(trans, root, dir, |
| dest->root_key.objectid, |
| dentry->d_name.name, |
| dentry->d_name.len); |
| if (ret) { |
| err = ret; |
| btrfs_abort_transaction(trans, root, ret); |
| goto out_end_trans; |
| } |
| |
| btrfs_record_root_in_trans(trans, dest); |
| |
| memset(&dest->root_item.drop_progress, 0, |
| sizeof(dest->root_item.drop_progress)); |
| dest->root_item.drop_level = 0; |
| btrfs_set_root_refs(&dest->root_item, 0); |
| |
| if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { |
| ret = btrfs_insert_orphan_item(trans, |
| root->fs_info->tree_root, |
| dest->root_key.objectid); |
| if (ret) { |
| btrfs_abort_transaction(trans, root, ret); |
| err = ret; |
| goto out_end_trans; |
| } |
| } |
| |
| ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root, |
| dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL, |
| dest->root_key.objectid); |
| if (ret && ret != -ENOENT) { |
| btrfs_abort_transaction(trans, root, ret); |
| err = ret; |
| goto out_end_trans; |
| } |
| if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { |
| ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root, |
| dest->root_item.received_uuid, |
| BTRFS_UUID_KEY_RECEIVED_SUBVOL, |
| dest->root_key.objectid); |
| if (ret && ret != -ENOENT) { |
| btrfs_abort_transaction(trans, root, ret); |
| err = ret; |
| goto out_end_trans; |
| } |
| } |
| |
| out_end_trans: |
| trans->block_rsv = NULL; |
| trans->bytes_reserved = 0; |
| ret = btrfs_end_transaction(trans, root); |
| if (ret && !err) |
| err = ret; |
| inode->i_flags |= S_DEAD; |
| out_release: |
| btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved); |
| out_up_write: |
| up_write(&root->fs_info->subvol_sem); |
| out_unlock: |
| if (err) { |
| spin_lock(&dest->root_item_lock); |
| root_flags = btrfs_root_flags(&dest->root_item); |
| btrfs_set_root_flags(&dest->root_item, |
| root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); |
| spin_unlock(&dest->root_item_lock); |
| } |
| mutex_unlock(&inode->i_mutex); |
| if (!err) { |
| shrink_dcache_sb(root->fs_info->sb); |
| btrfs_invalidate_inodes(dest); |
| d_delete(dentry); |
| ASSERT(dest->send_in_progress == 0); |
| |
| /* the last ref */ |
| if (dest->cache_inode) { |
| iput(dest->cache_inode); |
| dest->cache_inode = NULL; |
| } |
| } |
| out_dput: |
| dput(dentry); |
| out_unlock_dir: |
| mutex_unlock(&dir->i_mutex); |
| out_drop_write: |
| mnt_drop_write_file(file); |
| out: |
| kfree(vol_args); |
| return err; |
| } |
| |
| static int btrfs_ioctl_defrag(struct file *file, void __user *argp) |
| { |
| struct inode *inode = file_inode(file); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_ioctl_defrag_range_args *range; |
| int ret; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| return ret; |
| |
| if (btrfs_root_readonly(root)) { |
| ret = -EROFS; |
| goto out; |
| } |
| |
| switch (inode->i_mode & S_IFMT) { |
| case S_IFDIR: |
| if (!capable(CAP_SYS_ADMIN)) { |
| ret = -EPERM; |
| goto out; |
| } |
| ret = btrfs_defrag_root(root); |
| if (ret) |
| goto out; |
| ret = btrfs_defrag_root(root->fs_info->extent_root); |
| break; |
| case S_IFREG: |
| if (!(file->f_mode & FMODE_WRITE)) { |
| ret = -EINVAL; |
| goto out; |
| } |
| |
| range = kzalloc(sizeof(*range), GFP_KERNEL); |
| if (!range) { |
| ret = -ENOMEM; |
| goto out; |
| } |
| |
| if (argp) { |
| if (copy_from_user(range, argp, |
| sizeof(*range))) { |
| ret = -EFAULT; |
| kfree(range); |
| goto out; |
| } |
| /* compression requires us to start the IO */ |
| if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { |
| range->flags |= BTRFS_DEFRAG_RANGE_START_IO; |
| range->extent_thresh = (u32)-1; |
| } |
| } else { |
| /* the rest are all set to zero by kzalloc */ |
| range->len = (u64)-1; |
| } |
| ret = btrfs_defrag_file(file_inode(file), file, |
| range, 0, 0); |
| if (ret > 0) |
| ret = 0; |
| kfree(range); |
| break; |
| default: |
| ret = -EINVAL; |
| } |
| out: |
| mnt_drop_write_file(file); |
| return ret; |
| } |
| |
| static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg) |
| { |
| struct btrfs_ioctl_vol_args *vol_args; |
| int ret; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, |
| 1)) { |
| return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; |
| } |
| |
| mutex_lock(&root->fs_info->volume_mutex); |
| vol_args = memdup_user(arg, sizeof(*vol_args)); |
| if (IS_ERR(vol_args)) { |
| ret = PTR_ERR(vol_args); |
| goto out; |
| } |
| |
| vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; |
| ret = btrfs_init_new_device(root, vol_args->name); |
| |
| kfree(vol_args); |
| out: |
| mutex_unlock(&root->fs_info->volume_mutex); |
| atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); |
| return ret; |
| } |
| |
| static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) |
| { |
| struct btrfs_root *root = BTRFS_I(file_inode(file))->root; |
| struct btrfs_ioctl_vol_args *vol_args; |
| int ret; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| return ret; |
| |
| vol_args = memdup_user(arg, sizeof(*vol_args)); |
| if (IS_ERR(vol_args)) { |
| ret = PTR_ERR(vol_args); |
| goto out; |
| } |
| |
| vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; |
| |
| if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, |
| 1)) { |
| ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; |
| goto out; |
| } |
| |
| mutex_lock(&root->fs_info->volume_mutex); |
| ret = btrfs_rm_device(root, vol_args->name); |
| mutex_unlock(&root->fs_info->volume_mutex); |
| atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); |
| |
| out: |
| kfree(vol_args); |
| mnt_drop_write_file(file); |
| return ret; |
| } |
| |
| static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) |
| { |
| struct btrfs_ioctl_fs_info_args *fi_args; |
| struct btrfs_device *device; |
| struct btrfs_device *next; |
| struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; |
| int ret = 0; |
| |
| fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); |
| if (!fi_args) |
| return -ENOMEM; |
| |
| mutex_lock(&fs_devices->device_list_mutex); |
| fi_args->num_devices = fs_devices->num_devices; |
| memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid)); |
| |
| list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { |
| if (device->devid > fi_args->max_id) |
| fi_args->max_id = device->devid; |
| } |
| mutex_unlock(&fs_devices->device_list_mutex); |
| |
| fi_args->nodesize = root->fs_info->super_copy->nodesize; |
| fi_args->sectorsize = root->fs_info->super_copy->sectorsize; |
| fi_args->clone_alignment = root->fs_info->super_copy->sectorsize; |
| |
| if (copy_to_user(arg, fi_args, sizeof(*fi_args))) |
| ret = -EFAULT; |
| |
| kfree(fi_args); |
| return ret; |
| } |
| |
| static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) |
| { |
| struct btrfs_ioctl_dev_info_args *di_args; |
| struct btrfs_device *dev; |
| struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; |
| int ret = 0; |
| char *s_uuid = NULL; |
| |
| di_args = memdup_user(arg, sizeof(*di_args)); |
| if (IS_ERR(di_args)) |
| return PTR_ERR(di_args); |
| |
| if (!btrfs_is_empty_uuid(di_args->uuid)) |
| s_uuid = di_args->uuid; |
| |
| mutex_lock(&fs_devices->device_list_mutex); |
| dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL); |
| |
| if (!dev) { |
| ret = -ENODEV; |
| goto out; |
| } |
| |
| di_args->devid = dev->devid; |
| di_args->bytes_used = dev->bytes_used; |
| di_args->total_bytes = dev->total_bytes; |
| memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
| if (dev->name) { |
| struct rcu_string *name; |
| |
| rcu_read_lock(); |
| name = rcu_dereference(dev->name); |
| strncpy(di_args->path, name->str, sizeof(di_args->path)); |
| rcu_read_unlock(); |
| di_args->path[sizeof(di_args->path) - 1] = 0; |
| } else { |
| di_args->path[0] = '\0'; |
| } |
| |
| out: |
| mutex_unlock(&fs_devices->device_list_mutex); |
| if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) |
| ret = -EFAULT; |
| |
| kfree(di_args); |
| return ret; |
| } |
| |
| static struct page *extent_same_get_page(struct inode *inode, u64 off) |
| { |
| struct page *page; |
| pgoff_t index; |
| struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
| |
| index = off >> PAGE_CACHE_SHIFT; |
| |
| page = grab_cache_page(inode->i_mapping, index); |
| if (!page) |
| return NULL; |
| |
| if (!PageUptodate(page)) { |
| if (extent_read_full_page_nolock(tree, page, btrfs_get_extent, |
| 0)) |
| return NULL; |
| lock_page(page); |
| if (!PageUptodate(page)) { |
| unlock_page(page); |
| page_cache_release(page); |
| return NULL; |
| } |
| } |
| unlock_page(page); |
| |
| return page; |
| } |
| |
| static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) |
| { |
| /* do any pending delalloc/csum calc on src, one way or |
| another, and lock file content */ |
| while (1) { |
| struct btrfs_ordered_extent *ordered; |
| lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); |
| ordered = btrfs_lookup_first_ordered_extent(inode, |
| off + len - 1); |
| if ((!ordered || |
| ordered->file_offset + ordered->len <= off || |
| ordered->file_offset >= off + len) && |
| !test_range_bit(&BTRFS_I(inode)->io_tree, off, |
| off + len - 1, EXTENT_DELALLOC, 0, NULL)) { |
| if (ordered) |
| btrfs_put_ordered_extent(ordered); |
| break; |
| } |
| unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); |
| if (ordered) |
| btrfs_put_ordered_extent(ordered); |
| btrfs_wait_ordered_range(inode, off, len); |
| } |
| } |
| |
| static void btrfs_double_unlock(struct inode *inode1, u64 loff1, |
| struct inode *inode2, u64 loff2, u64 len) |
| { |
| unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); |
| unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); |
| |
| mutex_unlock(&inode1->i_mutex); |
| mutex_unlock(&inode2->i_mutex); |
| } |
| |
| static void btrfs_double_lock(struct inode *inode1, u64 loff1, |
| struct inode *inode2, u64 loff2, u64 len) |
| { |
| if (inode1 < inode2) { |
| swap(inode1, inode2); |
| swap(loff1, loff2); |
| } |
| |
| mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); |
| lock_extent_range(inode1, loff1, len); |
| if (inode1 != inode2) { |
| mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); |
| lock_extent_range(inode2, loff2, len); |
| } |
| } |
| |
| static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, |
| u64 dst_loff, u64 len) |
| { |
| int ret = 0; |
| struct page *src_page, *dst_page; |
| unsigned int cmp_len = PAGE_CACHE_SIZE; |
| void *addr, *dst_addr; |
| |
| while (len) { |
| if (len < PAGE_CACHE_SIZE) |
| cmp_len = len; |
| |
| src_page = extent_same_get_page(src, loff); |
| if (!src_page) |
| return -EINVAL; |
| dst_page = extent_same_get_page(dst, dst_loff); |
| if (!dst_page) { |
| page_cache_release(src_page); |
| return -EINVAL; |
| } |
| addr = kmap_atomic(src_page); |
| dst_addr = kmap_atomic(dst_page); |
| |
| flush_dcache_page(src_page); |
| flush_dcache_page(dst_page); |
| |
| if (memcmp(addr, dst_addr, cmp_len)) |
| ret = BTRFS_SAME_DATA_DIFFERS; |
| |
| kunmap_atomic(addr); |
| kunmap_atomic(dst_addr); |
| page_cache_release(src_page); |
| page_cache_release(dst_page); |
| |
| if (ret) |
| break; |
| |
| loff += cmp_len; |
| dst_loff += cmp_len; |
| len -= cmp_len; |
| } |
| |
| return ret; |
| } |
| |
| static int extent_same_check_offsets(struct inode *inode, u64 off, u64 len) |
| { |
| u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize; |
| |
| if (off + len > inode->i_size || off + len < off) |
| return -EINVAL; |
| /* Check that we are block aligned - btrfs_clone() requires this */ |
| if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs)) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int btrfs_extent_same(struct inode *src, u64 loff, u64 len, |
| struct inode *dst, u64 dst_loff) |
| { |
| int ret; |
| |
| /* |
| * btrfs_clone() can't handle extents in the same file |
| * yet. Once that works, we can drop this check and replace it |
| * with a check for the same inode, but overlapping extents. |
| */ |
| if (src == dst) |
| return -EINVAL; |
| |
| btrfs_double_lock(src, loff, dst, dst_loff, len); |
| |
| ret = extent_same_check_offsets(src, loff, len); |
| if (ret) |
| goto out_unlock; |
| |
| ret = extent_same_check_offsets(dst, dst_loff, len); |
| if (ret) |
| goto out_unlock; |
| |
| /* don't make the dst file partly checksummed */ |
| if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != |
| (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) { |
| ret = -EINVAL; |
| goto out_unlock; |
| } |
| |
| ret = btrfs_cmp_data(src, loff, dst, dst_loff, len); |
| if (ret == 0) |
| ret = btrfs_clone(src, dst, loff, len, len, dst_loff); |
| |
| out_unlock: |
| btrfs_double_unlock(src, loff, dst, dst_loff, len); |
| |
| return ret; |
| } |
| |
| #define BTRFS_MAX_DEDUPE_LEN (16 * 1024 * 1024) |
| |
| static long btrfs_ioctl_file_extent_same(struct file *file, |
| struct btrfs_ioctl_same_args __user *argp) |
| { |
| struct btrfs_ioctl_same_args *same; |
| struct btrfs_ioctl_same_extent_info *info; |
| struct inode *src = file_inode(file); |
| u64 off; |
| u64 len; |
| int i; |
| int ret; |
| unsigned long size; |
| u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; |
| bool is_admin = capable(CAP_SYS_ADMIN); |
| u16 count; |
| |
| if (!(file->f_mode & FMODE_READ)) |
| return -EINVAL; |
| |
| ret = mnt_want_write_file(file); |
| if (ret) |
| return ret; |
| |
| if (get_user(count, &argp->dest_count)) { |
| ret = -EFAULT; |
| goto out; |
| } |
| |
| size = offsetof(struct btrfs_ioctl_same_args __user, info[count]); |
| |
| same = memdup_user(argp, size); |
| |
| if (IS_ERR(same)) { |
| ret = PTR_ERR(same); |
| goto out; |
| } |
| |
| off = same->logical_offset; |
| len = same->length; |
| |
| /* |
| * Limit the total length we will dedupe for each operation. |
| * This is intended to bound the total time spent in this |
| * ioctl to something sane. |
| */ |
| if (len > BTRFS_MAX_DEDUPE_LEN) |
| len = BTRFS_MAX_DEDUPE_LEN; |
| |
| if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { |
| /* |
| * Btrfs does not support blocksize < page_size. As a |
| * result, btrfs_cmp_data() won't correctly handle |
| * this situation without an update. |
| */ |
| ret = -EINVAL; |
| goto out; |
| } |
| |
| ret = -EISDIR; |
| if (S_ISDIR(src->i_mode)) |
| goto out; |
| |
| ret = -EACCES; |
| if (!S_ISREG(src->i_mode)) |
| goto out; |
| |
| /* pre-format output fields to sane values */ |
| for (i = 0; i < count; i++) { |
| same->info[i].bytes_deduped = 0ULL; |
| same->info[i].status = 0 |