| /* |
| * Copyright (C) 2007 Oracle. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public |
| * License v2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public |
| * License along with this program; if not, write to the |
| * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| * Boston, MA 021110-1307, USA. |
| */ |
| |
| #include <linux/kernel.h> |
| #include <linux/bio.h> |
| #include <linux/buffer_head.h> |
| #include <linux/file.h> |
| #include <linux/fs.h> |
| #include <linux/pagemap.h> |
| #include <linux/highmem.h> |
| #include <linux/time.h> |
| #include <linux/init.h> |
| #include <linux/string.h> |
| #include <linux/backing-dev.h> |
| #include <linux/mpage.h> |
| #include <linux/swap.h> |
| #include <linux/writeback.h> |
| #include <linux/compat.h> |
| #include <linux/bit_spinlock.h> |
| #include <linux/xattr.h> |
| #include <linux/posix_acl.h> |
| #include <linux/falloc.h> |
| #include <linux/slab.h> |
| #include <linux/ratelimit.h> |
| #include <linux/mount.h> |
| #include <linux/btrfs.h> |
| #include <linux/blkdev.h> |
| #include <linux/posix_acl_xattr.h> |
| #include <linux/uio.h> |
| #include "ctree.h" |
| #include "disk-io.h" |
| #include "transaction.h" |
| #include "btrfs_inode.h" |
| #include "print-tree.h" |
| #include "ordered-data.h" |
| #include "xattr.h" |
| #include "tree-log.h" |
| #include "volumes.h" |
| #include "compression.h" |
| #include "locking.h" |
| #include "free-space-cache.h" |
| #include "inode-map.h" |
| #include "backref.h" |
| #include "hash.h" |
| #include "props.h" |
| #include "qgroup.h" |
| #include "dedupe.h" |
| |
| struct btrfs_iget_args { |
| struct btrfs_key *location; |
| struct btrfs_root *root; |
| }; |
| |
| struct btrfs_dio_data { |
| u64 outstanding_extents; |
| u64 reserve; |
| u64 unsubmitted_oe_range_start; |
| u64 unsubmitted_oe_range_end; |
| int overwrite; |
| }; |
| |
| static const struct inode_operations btrfs_dir_inode_operations; |
| static const struct inode_operations btrfs_symlink_inode_operations; |
| static const struct inode_operations btrfs_dir_ro_inode_operations; |
| static const struct inode_operations btrfs_special_inode_operations; |
| static const struct inode_operations btrfs_file_inode_operations; |
| static const struct address_space_operations btrfs_aops; |
| static const struct address_space_operations btrfs_symlink_aops; |
| static const struct file_operations btrfs_dir_file_operations; |
| static const struct extent_io_ops btrfs_extent_io_ops; |
| |
| static struct kmem_cache *btrfs_inode_cachep; |
| struct kmem_cache *btrfs_trans_handle_cachep; |
| struct kmem_cache *btrfs_path_cachep; |
| struct kmem_cache *btrfs_free_space_cachep; |
| |
| #define S_SHIFT 12 |
| static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { |
| [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, |
| [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, |
| [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, |
| [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, |
| [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, |
| [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, |
| [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, |
| }; |
| |
| static int btrfs_setsize(struct inode *inode, struct iattr *attr); |
| static int btrfs_truncate(struct inode *inode); |
| static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); |
| static noinline int cow_file_range(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, u64 delalloc_end, |
| int *page_started, unsigned long *nr_written, |
| int unlock, struct btrfs_dedupe_hash *hash); |
| static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, |
| u64 orig_start, u64 block_start, |
| u64 block_len, u64 orig_block_len, |
| u64 ram_bytes, int compress_type, |
| int type); |
| |
| static void __endio_write_update_ordered(struct inode *inode, |
| const u64 offset, const u64 bytes, |
| const bool uptodate); |
| |
| /* |
| * Cleanup all submitted ordered extents in specified range to handle errors |
| * from the fill_dellaloc() callback. |
| * |
| * NOTE: caller must ensure that when an error happens, it can not call |
| * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING |
| * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata |
| * to be released, which we want to happen only when finishing the ordered |
| * extent (btrfs_finish_ordered_io()). Also note that the caller of the |
| * fill_delalloc() callback already does proper cleanup for the first page of |
| * the range, that is, it invokes the callback writepage_end_io_hook() for the |
| * range of the first page. |
| */ |
| static inline void btrfs_cleanup_ordered_extents(struct inode *inode, |
| const u64 offset, |
| const u64 bytes) |
| { |
| unsigned long index = offset >> PAGE_SHIFT; |
| unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; |
| struct page *page; |
| |
| while (index <= end_index) { |
| page = find_get_page(inode->i_mapping, index); |
| index++; |
| if (!page) |
| continue; |
| ClearPagePrivate2(page); |
| put_page(page); |
| } |
| return __endio_write_update_ordered(inode, offset + PAGE_SIZE, |
| bytes - PAGE_SIZE, false); |
| } |
| |
| static int btrfs_dirty_inode(struct inode *inode); |
| |
| #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| void btrfs_test_inode_set_ops(struct inode *inode) |
| { |
| BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
| } |
| #endif |
| |
| static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
| struct inode *inode, struct inode *dir, |
| const struct qstr *qstr) |
| { |
| int err; |
| |
| err = btrfs_init_acl(trans, inode, dir); |
| if (!err) |
| err = btrfs_xattr_security_init(trans, inode, dir, qstr); |
| return err; |
| } |
| |
| /* |
| * this does all the hard work for inserting an inline extent into |
| * the btree. The caller should have done a btrfs_drop_extents so that |
| * no overlapping inline items exist in the btree |
| */ |
| static int insert_inline_extent(struct btrfs_trans_handle *trans, |
| struct btrfs_path *path, int extent_inserted, |
| struct btrfs_root *root, struct inode *inode, |
| u64 start, size_t size, size_t compressed_size, |
| int compress_type, |
| struct page **compressed_pages) |
| { |
| struct extent_buffer *leaf; |
| struct page *page = NULL; |
| char *kaddr; |
| unsigned long ptr; |
| struct btrfs_file_extent_item *ei; |
| int ret; |
| size_t cur_size = size; |
| unsigned long offset; |
| |
| if (compressed_size && compressed_pages) |
| cur_size = compressed_size; |
| |
| inode_add_bytes(inode, size); |
| |
| if (!extent_inserted) { |
| struct btrfs_key key; |
| size_t datasize; |
| |
| key.objectid = btrfs_ino(BTRFS_I(inode)); |
| key.offset = start; |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| |
| datasize = btrfs_file_extent_calc_inline_size(cur_size); |
| path->leave_spinning = 1; |
| ret = btrfs_insert_empty_item(trans, root, path, &key, |
| datasize); |
| if (ret) |
| goto fail; |
| } |
| leaf = path->nodes[0]; |
| ei = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| btrfs_set_file_extent_generation(leaf, ei, trans->transid); |
| btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); |
| btrfs_set_file_extent_encryption(leaf, ei, 0); |
| btrfs_set_file_extent_other_encoding(leaf, ei, 0); |
| btrfs_set_file_extent_ram_bytes(leaf, ei, size); |
| ptr = btrfs_file_extent_inline_start(ei); |
| |
| if (compress_type != BTRFS_COMPRESS_NONE) { |
| struct page *cpage; |
| int i = 0; |
| while (compressed_size > 0) { |
| cpage = compressed_pages[i]; |
| cur_size = min_t(unsigned long, compressed_size, |
| PAGE_SIZE); |
| |
| kaddr = kmap_atomic(cpage); |
| write_extent_buffer(leaf, kaddr, ptr, cur_size); |
| kunmap_atomic(kaddr); |
| |
| i++; |
| ptr += cur_size; |
| compressed_size -= cur_size; |
| } |
| btrfs_set_file_extent_compression(leaf, ei, |
| compress_type); |
| } else { |
| page = find_get_page(inode->i_mapping, |
| start >> PAGE_SHIFT); |
| btrfs_set_file_extent_compression(leaf, ei, 0); |
| kaddr = kmap_atomic(page); |
| offset = start & (PAGE_SIZE - 1); |
| write_extent_buffer(leaf, kaddr + offset, ptr, size); |
| kunmap_atomic(kaddr); |
| put_page(page); |
| } |
| btrfs_mark_buffer_dirty(leaf); |
| btrfs_release_path(path); |
| |
| /* |
| * we're an inline extent, so nobody can |
| * extend the file past i_size without locking |
| * a page we already have locked. |
| * |
| * We must do any isize and inode updates |
| * before we unlock the pages. Otherwise we |
| * could end up racing with unlink. |
| */ |
| BTRFS_I(inode)->disk_i_size = inode->i_size; |
| ret = btrfs_update_inode(trans, root, inode); |
| |
| fail: |
| return ret; |
| } |
| |
| |
| /* |
| * conditionally insert an inline extent into the file. This |
| * does the checks required to make sure the data is small enough |
| * to fit as an inline extent. |
| */ |
| static noinline int cow_file_range_inline(struct btrfs_root *root, |
| struct inode *inode, u64 start, |
| u64 end, size_t compressed_size, |
| int compress_type, |
| struct page **compressed_pages) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct btrfs_trans_handle *trans; |
| u64 isize = i_size_read(inode); |
| u64 actual_end = min(end + 1, isize); |
| u64 inline_len = actual_end - start; |
| u64 aligned_end = ALIGN(end, fs_info->sectorsize); |
| u64 data_len = inline_len; |
| int ret; |
| struct btrfs_path *path; |
| int extent_inserted = 0; |
| u32 extent_item_size; |
| |
| if (compressed_size) |
| data_len = compressed_size; |
| |
| if (start > 0 || |
| actual_end > fs_info->sectorsize || |
| data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || |
| (!compressed_size && |
| (actual_end & (fs_info->sectorsize - 1)) == 0) || |
| end + 1 < isize || |
| data_len > fs_info->max_inline) { |
| return 1; |
| } |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| btrfs_free_path(path); |
| return PTR_ERR(trans); |
| } |
| trans->block_rsv = &fs_info->delalloc_block_rsv; |
| |
| if (compressed_size && compressed_pages) |
| extent_item_size = btrfs_file_extent_calc_inline_size( |
| compressed_size); |
| else |
| extent_item_size = btrfs_file_extent_calc_inline_size( |
| inline_len); |
| |
| ret = __btrfs_drop_extents(trans, root, inode, path, |
| start, aligned_end, NULL, |
| 1, 1, extent_item_size, &extent_inserted); |
| if (ret) { |
| btrfs_abort_transaction(trans, ret); |
| goto out; |
| } |
| |
| if (isize > actual_end) |
| inline_len = min_t(u64, isize, actual_end); |
| ret = insert_inline_extent(trans, path, extent_inserted, |
| root, inode, start, |
| inline_len, compressed_size, |
| compress_type, compressed_pages); |
| if (ret && ret != -ENOSPC) { |
| btrfs_abort_transaction(trans, ret); |
| goto out; |
| } else if (ret == -ENOSPC) { |
| ret = 1; |
| goto out; |
| } |
| |
| set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); |
| btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start); |
| btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); |
| out: |
| /* |
| * Don't forget to free the reserved space, as for inlined extent |
| * it won't count as data extent, free them directly here. |
| * And at reserve time, it's always aligned to page size, so |
| * just free one page here. |
| */ |
| btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); |
| btrfs_free_path(path); |
| btrfs_end_transaction(trans); |
| return ret; |
| } |
| |
| struct async_extent { |
| u64 start; |
| u64 ram_size; |
| u64 compressed_size; |
| struct page **pages; |
| unsigned long nr_pages; |
| int compress_type; |
| struct list_head list; |
| }; |
| |
| struct async_cow { |
| struct inode *inode; |
| struct btrfs_root *root; |
| struct page *locked_page; |
| u64 start; |
| u64 end; |
| struct list_head extents; |
| struct btrfs_work work; |
| }; |
| |
| static noinline int add_async_extent(struct async_cow *cow, |
| u64 start, u64 ram_size, |
| u64 compressed_size, |
| struct page **pages, |
| unsigned long nr_pages, |
| int compress_type) |
| { |
| struct async_extent *async_extent; |
| |
| async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); |
| BUG_ON(!async_extent); /* -ENOMEM */ |
| async_extent->start = start; |
| async_extent->ram_size = ram_size; |
| async_extent->compressed_size = compressed_size; |
| async_extent->pages = pages; |
| async_extent->nr_pages = nr_pages; |
| async_extent->compress_type = compress_type; |
| list_add_tail(&async_extent->list, &cow->extents); |
| return 0; |
| } |
| |
| static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| |
| /* force compress */ |
| if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) |
| return 1; |
| /* defrag ioctl */ |
| if (BTRFS_I(inode)->defrag_compress) |
| return 1; |
| /* bad compression ratios */ |
| if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) |
| return 0; |
| if (btrfs_test_opt(fs_info, COMPRESS) || |
| BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || |
| BTRFS_I(inode)->prop_compress) |
| return btrfs_compress_heuristic(inode, start, end); |
| return 0; |
| } |
| |
| static inline void inode_should_defrag(struct btrfs_inode *inode, |
| u64 start, u64 end, u64 num_bytes, u64 small_write) |
| { |
| /* If this is a small write inside eof, kick off a defrag */ |
| if (num_bytes < small_write && |
| (start > 0 || end + 1 < inode->disk_i_size)) |
| btrfs_add_inode_defrag(NULL, inode); |
| } |
| |
| /* |
| * we create compressed extents in two phases. The first |
| * phase compresses a range of pages that have already been |
| * locked (both pages and state bits are locked). |
| * |
| * This is done inside an ordered work queue, and the compression |
| * is spread across many cpus. The actual IO submission is step |
| * two, and the ordered work queue takes care of making sure that |
| * happens in the same order things were put onto the queue by |
| * writepages and friends. |
| * |
| * If this code finds it can't get good compression, it puts an |
| * entry onto the work queue to write the uncompressed bytes. This |
| * makes sure that both compressed inodes and uncompressed inodes |
| * are written in the same order that the flusher thread sent them |
| * down. |
| */ |
| static noinline void compress_file_range(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, |
| struct async_cow *async_cow, |
| int *num_added) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| u64 num_bytes; |
| u64 blocksize = fs_info->sectorsize; |
| u64 actual_end; |
| u64 isize = i_size_read(inode); |
| int ret = 0; |
| struct page **pages = NULL; |
| unsigned long nr_pages; |
| unsigned long total_compressed = 0; |
| unsigned long total_in = 0; |
| int i; |
| int will_compress; |
| int compress_type = fs_info->compress_type; |
| int redirty = 0; |
| |
| inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, |
| SZ_16K); |
| |
| actual_end = min_t(u64, isize, end + 1); |
| again: |
| will_compress = 0; |
| nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; |
| BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); |
| nr_pages = min_t(unsigned long, nr_pages, |
| BTRFS_MAX_COMPRESSED / PAGE_SIZE); |
| |
| /* |
| * we don't want to send crud past the end of i_size through |
| * compression, that's just a waste of CPU time. So, if the |
| * end of the file is before the start of our current |
| * requested range of bytes, we bail out to the uncompressed |
| * cleanup code that can deal with all of this. |
| * |
| * It isn't really the fastest way to fix things, but this is a |
| * very uncommon corner. |
| */ |
| if (actual_end <= start) |
| goto cleanup_and_bail_uncompressed; |
| |
| total_compressed = actual_end - start; |
| |
| /* |
| * skip compression for a small file range(<=blocksize) that |
| * isn't an inline extent, since it doesn't save disk space at all. |
| */ |
| if (total_compressed <= blocksize && |
| (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) |
| goto cleanup_and_bail_uncompressed; |
| |
| total_compressed = min_t(unsigned long, total_compressed, |
| BTRFS_MAX_UNCOMPRESSED); |
| num_bytes = ALIGN(end - start + 1, blocksize); |
| num_bytes = max(blocksize, num_bytes); |
| total_in = 0; |
| ret = 0; |
| |
| /* |
| * we do compression for mount -o compress and when the |
| * inode has not been flagged as nocompress. This flag can |
| * change at any time if we discover bad compression ratios. |
| */ |
| if (inode_need_compress(inode, start, end)) { |
| WARN_ON(pages); |
| pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); |
| if (!pages) { |
| /* just bail out to the uncompressed code */ |
| goto cont; |
| } |
| |
| if (BTRFS_I(inode)->defrag_compress) |
| compress_type = BTRFS_I(inode)->defrag_compress; |
| else if (BTRFS_I(inode)->prop_compress) |
| compress_type = BTRFS_I(inode)->prop_compress; |
| |
| /* |
| * we need to call clear_page_dirty_for_io on each |
| * page in the range. Otherwise applications with the file |
| * mmap'd can wander in and change the page contents while |
| * we are compressing them. |
| * |
| * If the compression fails for any reason, we set the pages |
| * dirty again later on. |
| */ |
| extent_range_clear_dirty_for_io(inode, start, end); |
| redirty = 1; |
| ret = btrfs_compress_pages(compress_type, |
| inode->i_mapping, start, |
| pages, |
| &nr_pages, |
| &total_in, |
| &total_compressed); |
| |
| if (!ret) { |
| unsigned long offset = total_compressed & |
| (PAGE_SIZE - 1); |
| struct page *page = pages[nr_pages - 1]; |
| char *kaddr; |
| |
| /* zero the tail end of the last page, we might be |
| * sending it down to disk |
| */ |
| if (offset) { |
| kaddr = kmap_atomic(page); |
| memset(kaddr + offset, 0, |
| PAGE_SIZE - offset); |
| kunmap_atomic(kaddr); |
| } |
| will_compress = 1; |
| } |
| } |
| cont: |
| if (start == 0) { |
| /* lets try to make an inline extent */ |
| if (ret || total_in < (actual_end - start)) { |
| /* we didn't compress the entire range, try |
| * to make an uncompressed inline extent. |
| */ |
| ret = cow_file_range_inline(root, inode, start, end, |
| 0, BTRFS_COMPRESS_NONE, NULL); |
| } else { |
| /* try making a compressed inline extent */ |
| ret = cow_file_range_inline(root, inode, start, end, |
| total_compressed, |
| compress_type, pages); |
| } |
| if (ret <= 0) { |
| unsigned long clear_flags = EXTENT_DELALLOC | |
| EXTENT_DELALLOC_NEW | EXTENT_DEFRAG; |
| unsigned long page_error_op; |
| |
| clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; |
| page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; |
| |
| /* |
| * inline extent creation worked or returned error, |
| * we don't need to create any more async work items. |
| * Unlock and free up our temp pages. |
| */ |
| extent_clear_unlock_delalloc(inode, start, end, end, |
| NULL, clear_flags, |
| PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| page_error_op | |
| PAGE_END_WRITEBACK); |
| if (ret == 0) |
| btrfs_free_reserved_data_space_noquota(inode, |
| start, |
| end - start + 1); |
| goto free_pages_out; |
| } |
| } |
| |
| if (will_compress) { |
| /* |
| * we aren't doing an inline extent round the compressed size |
| * up to a block size boundary so the allocator does sane |
| * things |
| */ |
| total_compressed = ALIGN(total_compressed, blocksize); |
| |
| /* |
| * one last check to make sure the compression is really a |
| * win, compare the page count read with the blocks on disk, |
| * compression must free at least one sector size |
| */ |
| total_in = ALIGN(total_in, PAGE_SIZE); |
| if (total_compressed + blocksize <= total_in) { |
| num_bytes = total_in; |
| *num_added += 1; |
| |
| /* |
| * The async work queues will take care of doing actual |
| * allocation on disk for these compressed pages, and |
| * will submit them to the elevator. |
| */ |
| add_async_extent(async_cow, start, num_bytes, |
| total_compressed, pages, nr_pages, |
| compress_type); |
| |
| if (start + num_bytes < end) { |
| start += num_bytes; |
| pages = NULL; |
| cond_resched(); |
| goto again; |
| } |
| return; |
| } |
| } |
| if (pages) { |
| /* |
| * the compression code ran but failed to make things smaller, |
| * free any pages it allocated and our page pointer array |
| */ |
| for (i = 0; i < nr_pages; i++) { |
| WARN_ON(pages[i]->mapping); |
| put_page(pages[i]); |
| } |
| kfree(pages); |
| pages = NULL; |
| total_compressed = 0; |
| nr_pages = 0; |
| |
| /* flag the file so we don't compress in the future */ |
| if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && |
| !(BTRFS_I(inode)->prop_compress)) { |
| BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
| } |
| } |
| cleanup_and_bail_uncompressed: |
| /* |
| * No compression, but we still need to write the pages in the file |
| * we've been given so far. redirty the locked page if it corresponds |
| * to our extent and set things up for the async work queue to run |
| * cow_file_range to do the normal delalloc dance. |
| */ |
| if (page_offset(locked_page) >= start && |
| page_offset(locked_page) <= end) |
| __set_page_dirty_nobuffers(locked_page); |
| /* unlocked later on in the async handlers */ |
| |
| if (redirty) |
| extent_range_redirty_for_io(inode, start, end); |
| add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0, |
| BTRFS_COMPRESS_NONE); |
| *num_added += 1; |
| |
| return; |
| |
| free_pages_out: |
| for (i = 0; i < nr_pages; i++) { |
| WARN_ON(pages[i]->mapping); |
| put_page(pages[i]); |
| } |
| kfree(pages); |
| } |
| |
| static void free_async_extent_pages(struct async_extent *async_extent) |
| { |
| int i; |
| |
| if (!async_extent->pages) |
| return; |
| |
| for (i = 0; i < async_extent->nr_pages; i++) { |
| WARN_ON(async_extent->pages[i]->mapping); |
| put_page(async_extent->pages[i]); |
| } |
| kfree(async_extent->pages); |
| async_extent->nr_pages = 0; |
| async_extent->pages = NULL; |
| } |
| |
| /* |
| * phase two of compressed writeback. This is the ordered portion |
| * of the code, which only gets called in the order the work was |
| * queued. We walk all the async extents created by compress_file_range |
| * and send them down to the disk. |
| */ |
| static noinline void submit_compressed_extents(struct inode *inode, |
| struct async_cow *async_cow) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct async_extent *async_extent; |
| u64 alloc_hint = 0; |
| struct btrfs_key ins; |
| struct extent_map *em; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct extent_io_tree *io_tree; |
| int ret = 0; |
| |
| again: |
| while (!list_empty(&async_cow->extents)) { |
| async_extent = list_entry(async_cow->extents.next, |
| struct async_extent, list); |
| list_del(&async_extent->list); |
| |
| io_tree = &BTRFS_I(inode)->io_tree; |
| |
| retry: |
| /* did the compression code fall back to uncompressed IO? */ |
| if (!async_extent->pages) { |
| int page_started = 0; |
| unsigned long nr_written = 0; |
| |
| lock_extent(io_tree, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1); |
| |
| /* allocate blocks */ |
| ret = cow_file_range(inode, async_cow->locked_page, |
| async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| &page_started, &nr_written, 0, |
| NULL); |
| |
| /* JDM XXX */ |
| |
| /* |
| * if page_started, cow_file_range inserted an |
| * inline extent and took care of all the unlocking |
| * and IO for us. Otherwise, we need to submit |
| * all those pages down to the drive. |
| */ |
| if (!page_started && !ret) |
| extent_write_locked_range(io_tree, |
| inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| btrfs_get_extent, |
| WB_SYNC_ALL); |
| else if (ret) |
| unlock_page(async_cow->locked_page); |
| kfree(async_extent); |
| cond_resched(); |
| continue; |
| } |
| |
| lock_extent(io_tree, async_extent->start, |
| async_extent->start + async_extent->ram_size - 1); |
| |
| ret = btrfs_reserve_extent(root, async_extent->ram_size, |
| async_extent->compressed_size, |
| async_extent->compressed_size, |
| 0, alloc_hint, &ins, 1, 1); |
| if (ret) { |
| free_async_extent_pages(async_extent); |
| |
| if (ret == -ENOSPC) { |
| unlock_extent(io_tree, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1); |
| |
| /* |
| * we need to redirty the pages if we decide to |
| * fallback to uncompressed IO, otherwise we |
| * will not submit these pages down to lower |
| * layers. |
| */ |
| extent_range_redirty_for_io(inode, |
| async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1); |
| |
| goto retry; |
| } |
| goto out_free; |
| } |
| /* |
| * here we're doing allocation and writeback of the |
| * compressed pages |
| */ |
| em = create_io_em(inode, async_extent->start, |
| async_extent->ram_size, /* len */ |
| async_extent->start, /* orig_start */ |
| ins.objectid, /* block_start */ |
| ins.offset, /* block_len */ |
| ins.offset, /* orig_block_len */ |
| async_extent->ram_size, /* ram_bytes */ |
| async_extent->compress_type, |
| BTRFS_ORDERED_COMPRESSED); |
| if (IS_ERR(em)) |
| /* ret value is not necessary due to void function */ |
| goto out_free_reserve; |
| free_extent_map(em); |
| |
| ret = btrfs_add_ordered_extent_compress(inode, |
| async_extent->start, |
| ins.objectid, |
| async_extent->ram_size, |
| ins.offset, |
| BTRFS_ORDERED_COMPRESSED, |
| async_extent->compress_type); |
| if (ret) { |
| btrfs_drop_extent_cache(BTRFS_I(inode), |
| async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, 0); |
| goto out_free_reserve; |
| } |
| btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
| |
| /* |
| * clear dirty, set writeback and unlock the pages. |
| */ |
| extent_clear_unlock_delalloc(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| NULL, EXTENT_LOCKED | EXTENT_DELALLOC, |
| PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK); |
| if (btrfs_submit_compressed_write(inode, |
| async_extent->start, |
| async_extent->ram_size, |
| ins.objectid, |
| ins.offset, async_extent->pages, |
| async_extent->nr_pages)) { |
| struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
| struct page *p = async_extent->pages[0]; |
| const u64 start = async_extent->start; |
| const u64 end = start + async_extent->ram_size - 1; |
| |
| p->mapping = inode->i_mapping; |
| tree->ops->writepage_end_io_hook(p, start, end, |
| NULL, 0); |
| p->mapping = NULL; |
| extent_clear_unlock_delalloc(inode, start, end, end, |
| NULL, 0, |
| PAGE_END_WRITEBACK | |
| PAGE_SET_ERROR); |
| free_async_extent_pages(async_extent); |
| } |
| alloc_hint = ins.objectid + ins.offset; |
| kfree(async_extent); |
| cond_resched(); |
| } |
| return; |
| out_free_reserve: |
| btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
| btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); |
| out_free: |
| extent_clear_unlock_delalloc(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| NULL, EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DELALLOC_NEW | |
| EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, |
| PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | |
| PAGE_SET_ERROR); |
| free_async_extent_pages(async_extent); |
| kfree(async_extent); |
| goto again; |
| } |
| |
| static u64 get_extent_allocation_hint(struct inode *inode, u64 start, |
| u64 num_bytes) |
| { |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| struct extent_map *em; |
| u64 alloc_hint = 0; |
| |
| read_lock(&em_tree->lock); |
| em = search_extent_mapping(em_tree, start, num_bytes); |
| if (em) { |
| /* |
| * if block start isn't an actual block number then find the |
| * first block in this inode and use that as a hint. If that |
| * block is also bogus then just don't worry about it. |
| */ |
| if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
| free_extent_map(em); |
| em = search_extent_mapping(em_tree, 0, 0); |
| if (em && em->block_start < EXTENT_MAP_LAST_BYTE) |
| alloc_hint = em->block_start; |
| if (em) |
| free_extent_map(em); |
| } else { |
| alloc_hint = em->block_start; |
| free_extent_map(em); |
| } |
| } |
| read_unlock(&em_tree->lock); |
| |
| return alloc_hint; |
| } |
| |
| /* |
| * when extent_io.c finds a delayed allocation range in the file, |
| * the call backs end up in this code. The basic idea is to |
| * allocate extents on disk for the range, and create ordered data structs |
| * in ram to track those extents. |
| * |
| * locked_page is the page that writepage had locked already. We use |
| * it to make sure we don't do extra locks or unlocks. |
| * |
| * *page_started is set to one if we unlock locked_page and do everything |
| * required to start IO on it. It may be clean and already done with |
| * IO when we return. |
| */ |
| static noinline int cow_file_range(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, u64 delalloc_end, |
| int *page_started, unsigned long *nr_written, |
| int unlock, struct btrfs_dedupe_hash *hash) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| u64 alloc_hint = 0; |
| u64 num_bytes; |
| unsigned long ram_size; |
| u64 disk_num_bytes; |
| u64 cur_alloc_size = 0; |
| u64 blocksize = fs_info->sectorsize; |
| struct btrfs_key ins; |
| struct extent_map *em; |
| unsigned clear_bits; |
| unsigned long page_ops; |
| bool extent_reserved = false; |
| int ret = 0; |
| |
| if (btrfs_is_free_space_inode(BTRFS_I(inode))) { |
| WARN_ON_ONCE(1); |
| ret = -EINVAL; |
| goto out_unlock; |
| } |
| |
| num_bytes = ALIGN(end - start + 1, blocksize); |
| num_bytes = max(blocksize, num_bytes); |
| disk_num_bytes = num_bytes; |
| |
| inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); |
| |
| if (start == 0) { |
| /* lets try to make an inline extent */ |
| ret = cow_file_range_inline(root, inode, start, end, 0, |
| BTRFS_COMPRESS_NONE, NULL); |
| if (ret == 0) { |
| extent_clear_unlock_delalloc(inode, start, end, |
| delalloc_end, NULL, |
| EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DELALLOC_NEW | |
| EXTENT_DEFRAG, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| btrfs_free_reserved_data_space_noquota(inode, start, |
| end - start + 1); |
| *nr_written = *nr_written + |
| (end - start + PAGE_SIZE) / PAGE_SIZE; |
| *page_started = 1; |
| goto out; |
| } else if (ret < 0) { |
| goto out_unlock; |
| } |
| } |
| |
| BUG_ON(disk_num_bytes > |
| btrfs_super_total_bytes(fs_info->super_copy)); |
| |
| alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); |
| btrfs_drop_extent_cache(BTRFS_I(inode), start, |
| start + num_bytes - 1, 0); |
| |
| while (disk_num_bytes > 0) { |
| cur_alloc_size = disk_num_bytes; |
| ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, |
| fs_info->sectorsize, 0, alloc_hint, |
| &ins, 1, 1); |
| if (ret < 0) |
| goto out_unlock; |
| cur_alloc_size = ins.offset; |
| extent_reserved = true; |
| |
| ram_size = ins.offset; |
| em = create_io_em(inode, start, ins.offset, /* len */ |
| start, /* orig_start */ |
| ins.objectid, /* block_start */ |
| ins.offset, /* block_len */ |
| ins.offset, /* orig_block_len */ |
| ram_size, /* ram_bytes */ |
| BTRFS_COMPRESS_NONE, /* compress_type */ |
| BTRFS_ORDERED_REGULAR /* type */); |
| if (IS_ERR(em)) |
| goto out_reserve; |
| free_extent_map(em); |
| |
| ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
| ram_size, cur_alloc_size, 0); |
| if (ret) |
| goto out_drop_extent_cache; |
| |
| if (root->root_key.objectid == |
| BTRFS_DATA_RELOC_TREE_OBJECTID) { |
| ret = btrfs_reloc_clone_csums(inode, start, |
| cur_alloc_size); |
| /* |
| * Only drop cache here, and process as normal. |
| * |
| * We must not allow extent_clear_unlock_delalloc() |
| * at out_unlock label to free meta of this ordered |
| * extent, as its meta should be freed by |
| * btrfs_finish_ordered_io(). |
| * |
| * So we must continue until @start is increased to |
| * skip current ordered extent. |
| */ |
| if (ret) |
| btrfs_drop_extent_cache(BTRFS_I(inode), start, |
| start + ram_size - 1, 0); |
| } |
| |
| btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
| |
| /* we're not doing compressed IO, don't unlock the first |
| * page (which the caller expects to stay locked), don't |
| * clear any dirty bits and don't set any writeback bits |
| * |
| * Do set the Private2 bit so we know this page was properly |
| * setup for writepage |
| */ |
| page_ops = unlock ? PAGE_UNLOCK : 0; |
| page_ops |= PAGE_SET_PRIVATE2; |
| |
| extent_clear_unlock_delalloc(inode, start, |
| start + ram_size - 1, |
| delalloc_end, locked_page, |
| EXTENT_LOCKED | EXTENT_DELALLOC, |
| page_ops); |
| if (disk_num_bytes < cur_alloc_size) |
| disk_num_bytes = 0; |
| else |
| disk_num_bytes -= cur_alloc_size; |
| num_bytes -= cur_alloc_size; |
| alloc_hint = ins.objectid + ins.offset; |
| start += cur_alloc_size; |
| extent_reserved = false; |
| |
| /* |
| * btrfs_reloc_clone_csums() error, since start is increased |
| * extent_clear_unlock_delalloc() at out_unlock label won't |
| * free metadata of current ordered extent, we're OK to exit. |
| */ |
| if (ret) |
| goto out_unlock; |
| } |
| out: |
| return ret; |
| |
| out_drop_extent_cache: |
| btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); |
| out_reserve: |
| btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
| btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); |
| out_unlock: |
| clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | |
| EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; |
| page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK; |
| /* |
| * If we reserved an extent for our delalloc range (or a subrange) and |
| * failed to create the respective ordered extent, then it means that |
| * when we reserved the extent we decremented the extent's size from |
| * the data space_info's bytes_may_use counter and incremented the |
| * space_info's bytes_reserved counter by the same amount. We must make |
| * sure extent_clear_unlock_delalloc() does not try to decrement again |
| * the data space_info's bytes_may_use counter, therefore we do not pass |
| * it the flag EXTENT_CLEAR_DATA_RESV. |
| */ |
| if (extent_reserved) { |
| extent_clear_unlock_delalloc(inode, start, |
| start + cur_alloc_size, |
| start + cur_alloc_size, |
| locked_page, |
| clear_bits, |
| page_ops); |
| start += cur_alloc_size; |
| if (start >= end) |
| goto out; |
| } |
| extent_clear_unlock_delalloc(inode, start, end, delalloc_end, |
| locked_page, |
| clear_bits | EXTENT_CLEAR_DATA_RESV, |
| page_ops); |
| goto out; |
| } |
| |
| /* |
| * work queue call back to started compression on a file and pages |
| */ |
| static noinline void async_cow_start(struct btrfs_work *work) |
| { |
| struct async_cow *async_cow; |
| int num_added = 0; |
| async_cow = container_of(work, struct async_cow, work); |
| |
| compress_file_range(async_cow->inode, async_cow->locked_page, |
| async_cow->start, async_cow->end, async_cow, |
| &num_added); |
| if (num_added == 0) { |
| btrfs_add_delayed_iput(async_cow->inode); |
| async_cow->inode = NULL; |
| } |
| } |
| |
| /* |
| * work queue call back to submit previously compressed pages |
| */ |
| static noinline void async_cow_submit(struct btrfs_work *work) |
| { |
| struct btrfs_fs_info *fs_info; |
| struct async_cow *async_cow; |
| struct btrfs_root *root; |
| unsigned long nr_pages; |
| |
| async_cow = container_of(work, struct async_cow, work); |
| |
| root = async_cow->root; |
| fs_info = root->fs_info; |
| nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> |
| PAGE_SHIFT; |
| |
| /* |
| * atomic_sub_return implies a barrier for waitqueue_active |
| */ |
| if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < |
| 5 * SZ_1M && |
| waitqueue_active(&fs_info->async_submit_wait)) |
| wake_up(&fs_info->async_submit_wait); |
| |
| if (async_cow->inode) |
| submit_compressed_extents(async_cow->inode, async_cow); |
| } |
| |
| static noinline void async_cow_free(struct btrfs_work *work) |
| { |
| struct async_cow *async_cow; |
| async_cow = container_of(work, struct async_cow, work); |
| if (async_cow->inode) |
| btrfs_add_delayed_iput(async_cow->inode); |
| kfree(async_cow); |
| } |
| |
| static int cow_file_range_async(struct inode *inode, struct page *locked_page, |
| u64 start, u64 end, int *page_started, |
| unsigned long *nr_written) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct async_cow *async_cow; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| unsigned long nr_pages; |
| u64 cur_end; |
| |
| clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, |
| 1, 0, NULL, GFP_NOFS); |
| while (start < end) { |
| async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
| BUG_ON(!async_cow); /* -ENOMEM */ |
| async_cow->inode = igrab(inode); |
| async_cow->root = root; |
| async_cow->locked_page = locked_page; |
| async_cow->start = start; |
| |
| if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && |
| !btrfs_test_opt(fs_info, FORCE_COMPRESS)) |
| cur_end = end; |
| else |
| cur_end = min(end, start + SZ_512K - 1); |
| |
| async_cow->end = cur_end; |
| INIT_LIST_HEAD(&async_cow->extents); |
| |
| btrfs_init_work(&async_cow->work, |
| btrfs_delalloc_helper, |
| async_cow_start, async_cow_submit, |
| async_cow_free); |
| |
| nr_pages = (cur_end - start + PAGE_SIZE) >> |
| PAGE_SHIFT; |
| atomic_add(nr_pages, &fs_info->async_delalloc_pages); |
| |
| btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work); |
| |
| while (atomic_read(&fs_info->async_submit_draining) && |
| atomic_read(&fs_info->async_delalloc_pages)) { |
| wait_event(fs_info->async_submit_wait, |
| (atomic_read(&fs_info->async_delalloc_pages) == |
| 0)); |
| } |
| |
| *nr_written += nr_pages; |
| start = cur_end + 1; |
| } |
| *page_started = 1; |
| return 0; |
| } |
| |
| static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, |
| u64 bytenr, u64 num_bytes) |
| { |
| int ret; |
| struct btrfs_ordered_sum *sums; |
| LIST_HEAD(list); |
| |
| ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr, |
| bytenr + num_bytes - 1, &list, 0); |
| if (ret == 0 && list_empty(&list)) |
| return 0; |
| |
| while (!list_empty(&list)) { |
| sums = list_entry(list.next, struct btrfs_ordered_sum, list); |
| list_del(&sums->list); |
| kfree(sums); |
| } |
| return 1; |
| } |
| |
| /* |
| * when nowcow writeback call back. This checks for snapshots or COW copies |
| * of the extents that exist in the file, and COWs the file as required. |
| * |
| * If no cow copies or snapshots exist, we write directly to the existing |
| * blocks on disk |
| */ |
| static noinline int run_delalloc_nocow(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, int *page_started, int force, |
| unsigned long *nr_written) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct extent_buffer *leaf; |
| struct btrfs_path *path; |
| struct btrfs_file_extent_item *fi; |
| struct btrfs_key found_key; |
| struct extent_map *em; |
| u64 cow_start; |
| u64 cur_offset; |
| u64 extent_end; |
| u64 extent_offset; |
| u64 disk_bytenr; |
| u64 num_bytes; |
| u64 disk_num_bytes; |
| u64 ram_bytes; |
| int extent_type; |
| int ret, err; |
| int type; |
| int nocow; |
| int check_prev = 1; |
| bool nolock; |
| u64 ino = btrfs_ino(BTRFS_I(inode)); |
| |
| path = btrfs_alloc_path(); |
| if (!path) { |
| extent_clear_unlock_delalloc(inode, start, end, end, |
| locked_page, |
| EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DO_ACCOUNTING | |
| EXTENT_DEFRAG, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| return -ENOMEM; |
| } |
| |
| nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); |
| |
| cow_start = (u64)-1; |
| cur_offset = start; |
| while (1) { |
| ret = btrfs_lookup_file_extent(NULL, root, path, ino, |
| cur_offset, 0); |
| if (ret < 0) |
| goto error; |
| if (ret > 0 && path->slots[0] > 0 && check_prev) { |
| leaf = path->nodes[0]; |
| btrfs_item_key_to_cpu(leaf, &found_key, |
| path->slots[0] - 1); |
| if (found_key.objectid == ino && |
| found_key.type == BTRFS_EXTENT_DATA_KEY) |
| path->slots[0]--; |
| } |
| check_prev = 0; |
| next_slot: |
| leaf = path->nodes[0]; |
| if (path->slots[0] >= btrfs_header_nritems(leaf)) { |
| ret = btrfs_next_leaf(root, path); |
| if (ret < 0) |
| goto error; |
| if (ret > 0) |
| break; |
| leaf = path->nodes[0]; |
| } |
| |
| nocow = 0; |
| disk_bytenr = 0; |
| num_bytes = 0; |
| btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
| |
| if (found_key.objectid > ino) |
| break; |
| if (WARN_ON_ONCE(found_key.objectid < ino) || |
| found_key.type < BTRFS_EXTENT_DATA_KEY) { |
| path->slots[0]++; |
| goto next_slot; |
| } |
| if (found_key.type > BTRFS_EXTENT_DATA_KEY || |
| found_key.offset > end) |
| break; |
| |
| if (found_key.offset > cur_offset) { |
| extent_end = found_key.offset; |
| extent_type = 0; |
| goto out_check; |
| } |
| |
| fi = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| extent_type = btrfs_file_extent_type(leaf, fi); |
| |
| ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); |
| if (extent_type == BTRFS_FILE_EXTENT_REG || |
| extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
| disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
| extent_offset = btrfs_file_extent_offset(leaf, fi); |
| extent_end = found_key.offset + |
| btrfs_file_extent_num_bytes(leaf, fi); |
| disk_num_bytes = |
| btrfs_file_extent_disk_num_bytes(leaf, fi); |
| if (extent_end <= start) { |
| path->slots[0]++; |
| goto next_slot; |
| } |
| if (disk_bytenr == 0) |
| goto out_check; |
| if (btrfs_file_extent_compression(leaf, fi) || |
| btrfs_file_extent_encryption(leaf, fi) || |
| btrfs_file_extent_other_encoding(leaf, fi)) |
| goto out_check; |
| if (extent_type == BTRFS_FILE_EXTENT_REG && !force) |
| goto out_check; |
| if (btrfs_extent_readonly(fs_info, disk_bytenr)) |
| goto out_check; |
| if (btrfs_cross_ref_exist(root, ino, |
| found_key.offset - |
| extent_offset, disk_bytenr)) |
| goto out_check; |
| disk_bytenr += extent_offset; |
| disk_bytenr += cur_offset - found_key.offset; |
| num_bytes = min(end + 1, extent_end) - cur_offset; |
| /* |
| * if there are pending snapshots for this root, |
| * we fall into common COW way. |
| */ |
| if (!nolock) { |
| err = btrfs_start_write_no_snapshotting(root); |
| if (!err) |
| goto out_check; |
| } |
| /* |
| * force cow if csum exists in the range. |
| * this ensure that csum for a given extent are |
| * either valid or do not exist. |
| */ |
| if (csum_exist_in_range(fs_info, disk_bytenr, |
| num_bytes)) { |
| if (!nolock) |
| btrfs_end_write_no_snapshotting(root); |
| goto out_check; |
| } |
| if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { |
| if (!nolock) |
| btrfs_end_write_no_snapshotting(root); |
| goto out_check; |
| } |
| nocow = 1; |
| } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
| extent_end = found_key.offset + |
| btrfs_file_extent_inline_len(leaf, |
| path->slots[0], fi); |
| extent_end = ALIGN(extent_end, |
| fs_info->sectorsize); |
| } else { |
| BUG_ON(1); |
| } |
| out_check: |
| if (extent_end <= start) { |
| path->slots[0]++; |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshotting(root); |
| if (nocow) |
| btrfs_dec_nocow_writers(fs_info, disk_bytenr); |
| goto next_slot; |
| } |
| if (!nocow) { |
| if (cow_start == (u64)-1) |
| cow_start = cur_offset; |
| cur_offset = extent_end; |
| if (cur_offset > end) |
| break; |
| path->slots[0]++; |
| goto next_slot; |
| } |
| |
| btrfs_release_path(path); |
| if (cow_start != (u64)-1) { |
| ret = cow_file_range(inode, locked_page, |
| cow_start, found_key.offset - 1, |
| end, page_started, nr_written, 1, |
| NULL); |
| if (ret) { |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshotting(root); |
| if (nocow) |
| btrfs_dec_nocow_writers(fs_info, |
| disk_bytenr); |
| goto error; |
| } |
| cow_start = (u64)-1; |
| } |
| |
| if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
| u64 orig_start = found_key.offset - extent_offset; |
| |
| em = create_io_em(inode, cur_offset, num_bytes, |
| orig_start, |
| disk_bytenr, /* block_start */ |
| num_bytes, /* block_len */ |
| disk_num_bytes, /* orig_block_len */ |
| ram_bytes, BTRFS_COMPRESS_NONE, |
| BTRFS_ORDERED_PREALLOC); |
| if (IS_ERR(em)) { |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshotting(root); |
| if (nocow) |
| btrfs_dec_nocow_writers(fs_info, |
| disk_bytenr); |
| ret = PTR_ERR(em); |
| goto error; |
| } |
| free_extent_map(em); |
| } |
| |
| if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
| type = BTRFS_ORDERED_PREALLOC; |
| } else { |
| type = BTRFS_ORDERED_NOCOW; |
| } |
| |
| ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, |
| num_bytes, num_bytes, type); |
| if (nocow) |
| btrfs_dec_nocow_writers(fs_info, disk_bytenr); |
| BUG_ON(ret); /* -ENOMEM */ |
| |
| if (root->root_key.objectid == |
| BTRFS_DATA_RELOC_TREE_OBJECTID) |
| /* |
| * Error handled later, as we must prevent |
| * extent_clear_unlock_delalloc() in error handler |
| * from freeing metadata of created ordered extent. |
| */ |
| ret = btrfs_reloc_clone_csums(inode, cur_offset, |
| num_bytes); |
| |
| extent_clear_unlock_delalloc(inode, cur_offset, |
| cur_offset + num_bytes - 1, end, |
| locked_page, EXTENT_LOCKED | |
| EXTENT_DELALLOC | |
| EXTENT_CLEAR_DATA_RESV, |
| PAGE_UNLOCK | PAGE_SET_PRIVATE2); |
| |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshotting(root); |
| cur_offset = extent_end; |
| |
| /* |
| * btrfs_reloc_clone_csums() error, now we're OK to call error |
| * handler, as metadata for created ordered extent will only |
| * be freed by btrfs_finish_ordered_io(). |
| */ |
| if (ret) |
| goto error; |
| if (cur_offset > end) |
| break; |
| } |
| btrfs_release_path(path); |
| |
| if (cur_offset <= end && cow_start == (u64)-1) { |
| cow_start = cur_offset; |
| cur_offset = end; |
| } |
| |
| if (cow_start != (u64)-1) { |
| ret = cow_file_range(inode, locked_page, cow_start, end, end, |
| page_started, nr_written, 1, NULL); |
| if (ret) |
| goto error; |
| } |
| |
| error: |
| if (ret && cur_offset < end) |
| extent_clear_unlock_delalloc(inode, cur_offset, end, end, |
| locked_page, EXTENT_LOCKED | |
| EXTENT_DELALLOC | EXTENT_DEFRAG | |
| EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| btrfs_free_path(path); |
| return ret; |
| } |
| |
| static inline int need_force_cow(struct inode *inode, u64 start, u64 end) |
| { |
| |
| if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && |
| !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) |
| return 0; |
| |
| /* |
| * @defrag_bytes is a hint value, no spinlock held here, |
| * if is not zero, it means the file is defragging. |
| * Force cow if given extent needs to be defragged. |
| */ |
| if (BTRFS_I(inode)->defrag_bytes && |
| test_range_bit(&BTRFS_I(inode)->io_tree, start, end, |
| EXTENT_DEFRAG, 0, NULL)) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* |
| * extent_io.c call back to do delayed allocation processing |
| */ |
| static int run_delalloc_range(void *private_data, struct page *locked_page, |
| u64 start, u64 end, int *page_started, |
| unsigned long *nr_written) |
| { |
| struct inode *inode = private_data; |
| int ret; |
| int force_cow = need_force_cow(inode, start, end); |
| |
| if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { |
| ret = run_delalloc_nocow(inode, locked_page, start, end, |
| page_started, 1, nr_written); |
| } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { |
| ret = run_delalloc_nocow(inode, locked_page, start, end, |
| page_started, 0, nr_written); |
| } else if (!inode_need_compress(inode, start, end)) { |
| ret = cow_file_range(inode, locked_page, start, end, end, |
| page_started, nr_written, 1, NULL); |
| } else { |
| set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, |
| &BTRFS_I(inode)->runtime_flags); |
| ret = cow_file_range_async(inode, locked_page, start, end, |
| page_started, nr_written); |
| } |
| if (ret) |
| btrfs_cleanup_ordered_extents(inode, start, end - start + 1); |
| return ret; |
| } |
| |
| static void btrfs_split_extent_hook(void *private_data, |
| struct extent_state *orig, u64 split) |
| { |
| struct inode *inode = private_data; |
| u64 size; |
| |
| /* not delalloc, ignore it */ |
| if (!(orig->state & EXTENT_DELALLOC)) |
| return; |
| |
| size = orig->end - orig->start + 1; |
| if (size > BTRFS_MAX_EXTENT_SIZE) { |
| u32 num_extents; |
| u64 new_size; |
| |
| /* |
| * See the explanation in btrfs_merge_extent_hook, the same |
| * applies here, just in reverse. |
| */ |
| new_size = orig->end - split + 1; |
| num_extents = count_max_extents(new_size); |
| new_size = split - orig->start; |
| num_extents += count_max_extents(new_size); |
| if (count_max_extents(size) >= num_extents) |
| return; |
| } |
| |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents++; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| /* |
| * extent_io.c merge_extent_hook, used to track merged delayed allocation |
| * extents so we can keep track of new extents that are just merged onto old |
| * extents, such as when we are doing sequential writes, so we can properly |
| * account for the metadata space we'll need. |
| */ |
| static void btrfs_merge_extent_hook(void *private_data, |
| struct extent_state *new, |
| struct extent_state *other) |
| { |
| struct inode *inode = private_data; |
| u64 new_size, old_size; |
| u32 num_extents; |
| |
| /* not delalloc, ignore it */ |
| if (!(other->state & EXTENT_DELALLOC)) |
| return; |
| |
| if (new->start > other->start) |
| new_size = new->end - other->start + 1; |
| else |
| new_size = other->end - new->start + 1; |
| |
| /* we're not bigger than the max, unreserve the space and go */ |
| if (new_size <= BTRFS_MAX_EXTENT_SIZE) { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents--; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| return; |
| } |
| |
| /* |
| * We have to add up either side to figure out how many extents were |
| * accounted for before we merged into one big extent. If the number of |
| * extents we accounted for is <= the amount we need for the new range |
| * then we can return, otherwise drop. Think of it like this |
| * |
| * [ 4k][MAX_SIZE] |
| * |
| * So we've grown the extent by a MAX_SIZE extent, this would mean we |
| * need 2 outstanding extents, on one side we have 1 and the other side |
| * we have 1 so they are == and we can return. But in this case |
| * |
| * [MAX_SIZE+4k][MAX_SIZE+4k] |
| * |
| * Each range on their own accounts for 2 extents, but merged together |
| * they are only 3 extents worth of accounting, so we need to drop in |
| * this case. |
| */ |
| old_size = other->end - other->start + 1; |
| num_extents = count_max_extents(old_size); |
| old_size = new->end - new->start + 1; |
| num_extents += count_max_extents(old_size); |
| if (count_max_extents(new_size) >= num_extents) |
| return; |
| |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents--; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| static void btrfs_add_delalloc_inodes(struct btrfs_root *root, |
| struct inode *inode) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| |
| spin_lock(&root->delalloc_lock); |
| if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { |
| list_add_tail(&BTRFS_I(inode)->delalloc_inodes, |
| &root->delalloc_inodes); |
| set_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &BTRFS_I(inode)->runtime_flags); |
| root->nr_delalloc_inodes++; |
| if (root->nr_delalloc_inodes == 1) { |
| spin_lock(&fs_info->delalloc_root_lock); |
| BUG_ON(!list_empty(&root->delalloc_root)); |
| list_add_tail(&root->delalloc_root, |
| &fs_info->delalloc_roots); |
| spin_unlock(&fs_info->delalloc_root_lock); |
| } |
| } |
| spin_unlock(&root->delalloc_lock); |
| } |
| |
| static void btrfs_del_delalloc_inode(struct btrfs_root *root, |
| struct btrfs_inode *inode) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
| |
| spin_lock(&root->delalloc_lock); |
| if (!list_empty(&inode->delalloc_inodes)) { |
| list_del_init(&inode->delalloc_inodes); |
| clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &inode->runtime_flags); |
| root->nr_delalloc_inodes--; |
| if (!root->nr_delalloc_inodes) { |
| spin_lock(&fs_info->delalloc_root_lock); |
| BUG_ON(list_empty(&root->delalloc_root)); |
| list_del_init(&root->delalloc_root); |
| spin_unlock(&fs_info->delalloc_root_lock); |
| } |
| } |
| spin_unlock(&root->delalloc_lock); |
| } |
| |
| /* |
| * extent_io.c set_bit_hook, used to track delayed allocation |
| * bytes in this file, and to maintain the list of inodes that |
| * have pending delalloc work to be done. |
| */ |
| static void btrfs_set_bit_hook(void *private_data, |
| struct extent_state *state, unsigned *bits) |
| { |
| struct inode *inode = private_data; |
| |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| |
| if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) |
| WARN_ON(1); |
| /* |
| * set_bit and clear bit hooks normally require _irqsave/restore |
| * but in this case, we are only testing for the DELALLOC |
| * bit, which is only set or cleared with irqs on |
| */ |
| if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| u64 len = state->end + 1 - state->start; |
| bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); |
| |
| if (*bits & EXTENT_FIRST_DELALLOC) { |
| *bits &= ~EXTENT_FIRST_DELALLOC; |
| } else { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents++; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| /* For sanity tests */ |
| if (btrfs_is_testing(fs_info)) |
| return; |
| |
| percpu_counter_add_batch(&fs_info->delalloc_bytes, len, |
| fs_info->delalloc_batch); |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->delalloc_bytes += len; |
| if (*bits & EXTENT_DEFRAG) |
| BTRFS_I(inode)->defrag_bytes += len; |
| if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &BTRFS_I(inode)->runtime_flags)) |
| btrfs_add_delalloc_inodes(root, inode); |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| if (!(state->state & EXTENT_DELALLOC_NEW) && |
| (*bits & EXTENT_DELALLOC_NEW)) { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 - |
| state->start; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| } |
| |
| /* |
| * extent_io.c clear_bit_hook, see set_bit_hook for why |
| */ |
| static void btrfs_clear_bit_hook(void *private_data, |
| struct extent_state *state, |
| unsigned *bits) |
| { |
| struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data); |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
| u64 len = state->end + 1 - state->start; |
| u32 num_extents = count_max_extents(len); |
| |
| if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) { |
| spin_lock(&inode->lock); |
| inode->defrag_bytes -= len; |
| spin_unlock(&inode->lock); |
| } |
| |
| /* |
| * set_bit and clear bit hooks normally require _irqsave/restore |
| * but in this case, we are only testing for the DELALLOC |
| * bit, which is only set or cleared with irqs on |
| */ |
| if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
| struct btrfs_root *root = inode->root; |
| bool do_list = !btrfs_is_free_space_inode(inode); |
| |
| if (*bits & EXTENT_FIRST_DELALLOC) { |
| *bits &= ~EXTENT_FIRST_DELALLOC; |
| } else if (!(*bits & EXTENT_CLEAR_META_RESV)) { |
| spin_lock(&inode->lock); |
| inode->outstanding_extents -= num_extents; |
| spin_unlock(&inode->lock); |
| } |
| |
| /* |
| * We don't reserve metadata space for space cache inodes so we |
| * don't need to call dellalloc_release_metadata if there is an |
| * error. |
| */ |
| if (*bits & EXTENT_CLEAR_META_RESV && |
| root != fs_info->tree_root) |
| btrfs_delalloc_release_metadata(inode, len); |
| |
| /* For sanity tests. */ |
| if (btrfs_is_testing(fs_info)) |
| return; |
| |
| if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && |
| do_list && !(state->state & EXTENT_NORESERVE) && |
| (*bits & EXTENT_CLEAR_DATA_RESV)) |
| btrfs_free_reserved_data_space_noquota( |
| &inode->vfs_inode, |
| state->start, len); |
| |
| percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, |
| fs_info->delalloc_batch); |
| spin_lock(&inode->lock); |
| inode->delalloc_bytes -= len; |
| if (do_list && inode->delalloc_bytes == 0 && |
| test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &inode->runtime_flags)) |
| btrfs_del_delalloc_inode(root, inode); |
| spin_unlock(&inode->lock); |
| } |
| |
| if ((state->state & EXTENT_DELALLOC_NEW) && |
| (*bits & EXTENT_DELALLOC_NEW)) { |
| spin_lock(&inode->lock); |
| ASSERT(inode->new_delalloc_bytes >= len); |
| inode->new_delalloc_bytes -= len; |
| spin_unlock(&inode->lock); |
| } |
| } |
| |
| /* |
| * extent_io.c merge_bio_hook, this must check the chunk tree to make sure |
| * we don't create bios that span stripes or chunks |
| * |
| * return 1 if page cannot be merged to bio |
| * return 0 if page can be merged to bio |
| * return error otherwise |
| */ |
| int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
| size_t size, struct bio *bio, |
| unsigned long bio_flags) |
| { |
| struct inode *inode = page->mapping->host; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| u64 logical = (u64)bio->bi_iter.bi_sector << 9; |
| u64 length = 0; |
| u64 map_length; |
| int ret; |
| |
| if (bio_flags & EXTENT_BIO_COMPRESSED) |
| return 0; |
| |
| length = bio->bi_iter.bi_size; |
| map_length = length; |
| ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, |
| NULL, 0); |
| if (ret < 0) |
| return ret; |
| if (map_length < length + size) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * in order to insert checksums into the metadata in large chunks, |
| * we wait until bio submission time. All the pages in the bio are |
| * checksummed and sums are attached onto the ordered extent record. |
| * |
| * At IO completion time the cums attached on the ordered extent record |
| * are inserted into the btree |
| */ |
| static blk_status_t __btrfs_submit_bio_start(void *private_data, struct bio *bio, |
| int mirror_num, unsigned long bio_flags, |
| u64 bio_offset) |
| { |
| struct inode *inode = private_data; |
| blk_status_t ret = 0; |
| |
| ret = btrfs_csum_one_bio(inode, bio, 0, 0); |
| BUG_ON(ret); /* -ENOMEM */ |
| return 0; |
| } |
| |
| /* |
| * in order to insert checksums into the metadata in large chunks, |
| * we wait until bio submission time. All the pages in the bio are |
| * checksummed and sums are attached onto the ordered extent record. |
| * |
| * At IO completion time the cums attached on the ordered extent record |
| * are inserted into the btree |
| */ |
| static blk_status_t __btrfs_submit_bio_done(void *private_data, struct bio *bio, |
| int mirror_num, unsigned long bio_flags, |
| u64 bio_offset) |
| { |
| struct inode *inode = private_data; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| blk_status_t ret; |
| |
| ret = btrfs_map_bio(fs_info, bio, mirror_num, 1); |
| if (ret) { |
| bio->bi_status = ret; |
| bio_endio(bio); |
| } |
| return ret; |
| } |
| |
| /* |
| * extent_io.c submission hook. This does the right thing for csum calculation |
| * on write, or reading the csums from the tree before a read |
| */ |
| static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio, |
| int mirror_num, unsigned long bio_flags, |
| u64 bio_offset) |
| { |
| struct inode *inode = private_data; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; |
| blk_status_t ret = 0; |
| int skip_sum; |
| int async = !atomic_read(&BTRFS_I(inode)->sync_writers); |
| |
| skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
| |
| if (btrfs_is_free_space_inode(BTRFS_I(inode))) |
| metadata = BTRFS_WQ_ENDIO_FREE_SPACE; |
| |
| if (bio_op(bio) != REQ_OP_WRITE) { |
| ret = btrfs_bio_wq_end_io(fs_info, bio, metadata); |
| if (ret) |
| goto out; |
| |
| if (bio_flags & EXTENT_BIO_COMPRESSED) { |
| ret = btrfs_submit_compressed_read(inode, bio, |
| mirror_num, |
| bio_flags); |
| goto out; |
| } else if (!skip_sum) { |
| ret = btrfs_lookup_bio_sums(inode, bio, NULL); |
| if (ret) |
| goto out; |
| } |
| goto mapit; |
| } else if (async && !skip_sum) { |
| /* csum items have already been cloned */ |
| if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) |
| goto mapit; |
| /* we're doing a write, do the async checksumming */ |
| ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags, |
| bio_offset, inode, |
| __btrfs_submit_bio_start, |
| __btrfs_submit_bio_done); |
| goto out; |
| } else if (!skip_sum) { |
| ret = btrfs_csum_one_bio(inode, bio, 0, 0); |
| if (ret) |
| goto out; |
| } |
| |
| mapit: |
| ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
| |
| out: |
| if (ret) { |
| bio->bi_status = ret; |
| bio_endio(bio); |
| } |
| return ret; |
| } |
| |
| /* |
| * given a list of ordered sums record them in the inode. This happens |
| * at IO completion time based on sums calculated at bio submission time. |
| */ |
| static noinline int add_pending_csums(struct btrfs_trans_handle *trans, |
| struct inode *inode, struct list_head *list) |
| { |
| struct btrfs_ordered_sum *sum; |
| |
| list_for_each_entry(sum, list, list) { |
| trans->adding_csums = 1; |
| btrfs_csum_file_blocks(trans, |
| BTRFS_I(inode)->root->fs_info->csum_root, sum); |
| trans->adding_csums = 0; |
| } |
| return 0; |
| } |
| |
| int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
| struct extent_state **cached_state, int dedupe) |
| { |
| WARN_ON((end & (PAGE_SIZE - 1)) == 0); |
| return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
| cached_state); |
| } |
| |
| /* see btrfs_writepage_start_hook for details on why this is required */ |
| struct btrfs_writepage_fixup { |
| struct page *page; |
| struct btrfs_work work; |
| }; |
| |
| static void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
| { |
| struct btrfs_writepage_fixup *fixup; |
| struct btrfs_ordered_extent *ordered; |
| struct extent_state *cached_state = NULL; |
| struct extent_changeset *data_reserved = NULL; |
| struct page *page; |
| struct inode *inode; |
| u64 page_start; |
| u64 page_end; |
| int ret; |
| |
| fixup = container_of(work, struct btrfs_writepage_fixup, work); |
| page = fixup->page; |
| again: |
| lock_page(page); |
| if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { |
| ClearPageChecked(page); |
| goto out_page; |
| } |
| |
| inode = page->mapping->host; |
| page_start = page_offset(page); |
| page_end = page_offset(page) + PAGE_SIZE - 1; |
| |
| lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, |
| &cached_state); |
| |
| /* already ordered? We're done */ |
| if (PagePrivate2(page)) |
| goto out; |
| |
| ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, |
| PAGE_SIZE); |
| if (ordered) { |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, |
| page_end, &cached_state, GFP_NOFS); |
| unlock_page(page); |
| btrfs_start_ordered_extent(inode, ordered, 1); |
| btrfs_put_ordered_extent(ordered); |
| goto again; |
| } |
| |
| ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, |
| PAGE_SIZE); |
| if (ret) { |
| mapping_set_error(page->mapping, ret); |
| end_extent_writepage(page, ret, page_start, page_end); |
| ClearPageChecked(page); |
| goto out; |
| } |
| |
| btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state, |
| 0); |
| ClearPageChecked(page); |
| set_page_dirty(page); |
| out: |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
| &cached_state, GFP_NOFS); |
| out_page: |
| unlock_page(page); |
| put_page(page); |
| kfree(fixup); |
| extent_changeset_free(data_reserved); |
| } |
| |
| /* |
| * There are a few paths in the higher layers of the kernel that directly |
| * set the page dirty bit without asking the filesystem if it is a |
| * good idea. This causes problems because we want to make sure COW |
| * properly happens and the data=ordered rules are followed. |
| * |
| * In our case any range that doesn't have the ORDERED bit set |
| * hasn't been properly setup for IO. We kick off an async process |
| * to fix it up. The async helper will wait for ordered extents, set |
| * the delalloc bit and make it safe to write the page. |
| */ |
| static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) |
| { |
| struct inode *inode = page->mapping->host; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_writepage_fixup *fixup; |
| |
| /* this page is properly in the ordered list */ |
| if (TestClearPagePrivate2(page)) |
| return 0; |
| |
| if (PageChecked(page)) |
| return -EAGAIN; |
| |
| fixup = kzalloc(sizeof(*fixup), GFP_NOFS); |
| if (!fixup) |
| return -EAGAIN; |
| |
| SetPageChecked(page); |
| get_page(page); |
| btrfs_init_work(&fixup->work, btrfs_fixup_helper, |
| btrfs_writepage_fixup_worker, NULL, NULL); |
| fixup->page = page; |
| btrfs_queue_work(fs_info->fixup_workers, &fixup->work); |
| return -EBUSY; |
| } |
| |
| static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
| struct inode *inode, u64 file_pos, |
| u64 disk_bytenr, u64 disk_num_bytes, |
| u64 num_bytes, u64 ram_bytes, |
| u8 compression, u8 encryption, |
| u16 other_encoding, int extent_type) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_file_extent_item *fi; |
| struct btrfs_path *path; |
| struct extent_buffer *leaf; |
| struct btrfs_key ins; |
| u64 qg_released; |
| int extent_inserted = 0; |
| int ret; |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| /* |
| * we may be replacing one extent in the tree with another. |
| * The new extent is pinned in the extent map, and we don't want |
| * to drop it from the cache until it is completely in the btree. |
| * |
| * So, tell btrfs_drop_extents to leave this extent in the cache. |
| * the caller is expected to unpin it and allow it to be merged |
| * with the others. |
| */ |
| ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, |
| file_pos + num_bytes, NULL, 0, |
| 1, sizeof(*fi), &extent_inserted); |
| if (ret) |
| goto out; |
| |
| if (!extent_inserted) { |
| ins.objectid = btrfs_ino(BTRFS_I(inode)); |
| ins.offset = file_pos; |
| ins.type = BTRFS_EXTENT_DATA_KEY; |
| |
| path->leave_spinning = 1; |
| ret = btrfs_insert_empty_item(trans, root, path, &ins, |
| sizeof(*fi)); |
| if (ret) |
| goto out; |
| } |
| leaf = path->nodes[0]; |
| fi = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| btrfs_set_file_extent_generation(leaf, fi, trans->transid); |
| btrfs_set_file_extent_type(leaf, fi, extent_type); |
| btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); |
| btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); |
| btrfs_set_file_extent_offset(leaf, fi, 0); |
| btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); |
| btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); |
| btrfs_set_file_extent_compression(leaf, fi, compression); |
| btrfs_set_file_extent_encryption(leaf, fi, encryption); |
| btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); |
| |
| btrfs_mark_buffer_dirty(leaf); |
| btrfs_release_path(path); |
| |
| inode_add_bytes(inode, num_bytes); |
| |
| ins.objectid = disk_bytenr; |
| ins.offset = disk_num_bytes; |
| ins.type = BTRFS_EXTENT_ITEM_KEY; |
| |
| /* |
| * Release the reserved range from inode dirty range map, as it is |
| * already moved into delayed_ref_head |
| */ |
| ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes); |
| if (ret < 0) |
| goto out; |
| qg_released = ret; |
| ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid, |
| btrfs_ino(BTRFS_I(inode)), file_pos, qg_released, &ins); |
| out: |
| btrfs_free_path(path); |
| |
| return ret; |
| } |
| |
| /* snapshot-aware defrag */ |
| struct sa_defrag_extent_backref { |
| struct rb_node node; |
| struct old_sa_defrag_extent *old; |
| u64 root_id; |
| u64 inum; |
| u64 file_pos; |
| u64 extent_offset; |
| u64 num_bytes; |
| u64 generation; |
| }; |
| |
| struct old_sa_defrag_extent { |
| struct list_head list; |
| struct new_sa_defrag_extent *new; |
| |
| u64 extent_offset; |
| u64 bytenr; |
| u64 offset; |
| u64 len; |
| int count; |
| }; |
| |
| struct new_sa_defrag_extent { |
| struct rb_root root; |
| struct list_head head; |
| struct btrfs_path *path; |
| struct inode *inode; |
| u64 file_pos; |
| u64 len; |
| u64 bytenr; |
| u64 disk_len; |
| u8 compress_type; |
| }; |
| |
| static int backref_comp(struct sa_defrag_extent_backref *b1, |
| struct sa_defrag_extent_backref *b2) |
| { |
| if (b1->root_id < b2->root_id) |
| return -1; |
| else if (b1->root_id > b2->root_id) |
| return 1; |
| |
| if (b1->inum < b2->inum) |
| return -1; |
| else if (b1->inum > b2->inum) |
| return 1; |
| |
| if (b1->file_pos < b2->file_pos) |
| return -1; |
| else if (b1->file_pos > b2->file_pos) |
| return 1; |
| |
| /* |
| * [------------------------------] ===> (a range of space) |
| * |<--->| |<---->| =============> (fs/file tree A) |
| * |<---------------------------->| ===> (fs/file tree B) |
| * |
| * A range of space can refer to two file extents in one tree while |
| * refer to only one file extent in another tree. |
| * |
| * So we may process a disk offset more than one time(two extents in A) |
| * and locate at the same extent(one extent in B), then insert two same |
| * backrefs(both refer to the extent in B). |
| */ |
| return 0; |
| } |
| |
| static void backref_insert(struct rb_root *root, |
| struct sa_defrag_extent_backref *backref) |
| { |
| struct rb_node **p = &root->rb_node; |
| struct rb_node *parent = NULL; |
| struct sa_defrag_extent_backref *entry; |
| int ret; |
| |
| while (*p) { |
| parent = *p; |
| entry = rb_entry(parent, struct sa_defrag_extent_backref, node); |
| |
| ret = backref_comp(backref, entry); |
| if (ret < 0) |
| p = &(*p)->rb_left; |
| else |
| p = &(*p)->rb_right; |
| } |
| |
| rb_link_node(&backref->node, parent, p); |
| rb_insert_color(&backref->node, root); |
| } |
| |
| /* |
| * Note the backref might has changed, and in this case we just return 0. |
| */ |
| static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, |
| void *ctx) |
| { |
| struct btrfs_file_extent_item *extent; |
| struct old_sa_defrag_extent *old = ctx; |
| struct new_sa_defrag_extent *new = old->new; |
| struct btrfs_path *path = new->path; |
| struct btrfs_key key; |
| struct btrfs_root *root; |
| struct sa_defrag_extent_backref *backref; |
| struct extent_buffer *leaf; |
| struct inode *inode = new->inode; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| int slot; |
| int ret; |
| u64 extent_offset; |
| u64 num_bytes; |
| |
| if (BTRFS_I(inode)->root->root_key.objectid == root_id && |
| inum == btrfs_ino(BTRFS_I(inode))) |
| return 0; |
| |
| key.objectid = root_id; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| |
| root = btrfs_read_fs_root_no_name(fs_info, &key); |
| if (IS_ERR(root)) { |
| if (PTR_ERR(root) == -ENOENT) |
| return 0; |
| WARN_ON(1); |
| btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu", |
| inum, offset, root_id); |
| return PTR_ERR(root); |
| } |
| |
| key.objectid = inum; |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| if (offset > (u64)-1 << 32) |
| key.offset = 0; |
| else |
| key.offset = offset; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (WARN_ON(ret < 0)) |
| return ret; |
| ret = 0; |
| |
| while (1) { |
| cond_resched(); |
| |
| leaf = path->nodes[0]; |
| slot = path->slots[0]; |
| |
| if (slot >= btrfs_header_nritems(leaf)) { |
| ret = btrfs_next_leaf(root, path); |
| if (ret < 0) { |
| goto out; |
| } else if (ret > 0) { |
| ret = 0; |
| goto out; |
| } |
| continue; |
| } |
| |
| path->slots[0]++; |
| |
| btrfs_item_key_to_cpu(leaf, &key, slot); |
| |
| if (key.objectid > inum) |
| goto out; |
| |
| if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) |
| continue; |
| |
| extent = btrfs_item_ptr(leaf, slot, |
| struct btrfs_file_extent_item); |
| |
| if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) |
| continue; |
| |
| /* |
| * 'offset' refers to the exact key.offset, |
| * NOT the 'offset' field in btrfs_extent_data_ref, ie. |
| * (key.offset - extent_offset). |
| */ |
| if (key.offset != offset) |
| continue; |
| |
| extent_offset = btrfs_file_extent_offset(leaf, extent); |
| num_bytes = btrfs_file_extent_num_bytes(leaf, extent); |
| |
| if (extent_offset >= old->extent_offset + old->offset + |
| old->len || extent_offset + num_bytes <= |
| old->extent_offset + old->offset) |
| continue; |
| break; |
| } |
| |
| backref = kmalloc(sizeof(*backref), GFP_NOFS); |
| if (!backref) { |
| ret = -ENOENT; |
| goto out; |
| } |
| |
| backref->root_id = root_id; |
| backref->inum = inum; |
| backref->file_pos = offset; |
| backref->num_bytes = num_bytes; |
| backref->extent_offset = extent_offset; |
| backref->generation = btrfs_file_extent_generation(leaf, extent); |
| backref->old = old; |
| backref_insert(&new->root, backref); |
| old->count++; |
| out: |
| btrfs_release_path(path); |
| WARN_ON(ret); |
| return ret; |
| } |
| |
| static noinline bool record_extent_backrefs(struct btrfs_path *path, |
| struct new_sa_defrag_extent *new) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); |
| struct old_sa_defrag_extent *old, *tmp; |
| int ret; |
| |
| new->path = path; |
| |
| list_for_each_entry_safe(old, tmp, &new->head, list) { |
| ret = iterate_inodes_from_logical(old->bytenr + |
| old->extent_offset, fs_info, |
| path, record_one_backref, |
| old); |
| if (ret < 0 && ret != -ENOENT) |
| return false; |
| |
| /* no backref to be processed for this extent */ |
| if (!old->count) { |
| list_del(&old->list); |
| kfree(old); |
| } |
| } |
| |
| if (list_empty(&new->head)) |
| return false; |
| |
| return true; |
| } |
| |
| static int relink_is_mergable(struct extent_buffer *leaf, |
| struct btrfs_file_extent_item *fi, |
| struct new_sa_defrag_extent *new) |
| { |
| if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) |
| return 0; |
| |
| if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) |
| return 0; |
| |
| if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) |
| return 0; |
| |
| if (btrfs_file_extent_encryption(leaf, fi) || |
| btrfs_file_extent_other_encoding(leaf, fi)) |
| return 0; |
| |
| return 1; |
| } |
| |
| /* |
| * Note the backref might has changed, and in this case we just return 0. |
| */ |
| static noinline int relink_extent_backref(struct btrfs_path *path, |
| struct sa_defrag_extent_backref *prev, |
| struct sa_defrag_extent_backref *backref) |
| { |
| struct btrfs_file_extent_item *extent; |
| struct btrfs_file_extent_item *item; |
| struct btrfs_ordered_extent *ordered; |
| struct btrfs_trans_handle *trans; |
| struct btrfs_root *root; |
| struct btrfs_key key; |
| struct extent_buffer *leaf; |
| struct old_sa_defrag_extent *old = backref->old; |
| struct new_sa_defrag_extent *new = old->new; |
| struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); |
| struct inode *inode; |
| struct extent_state *cached = NULL; |
| int ret = 0; |
| u64 start; |
| u64 len; |
| u64 lock_start; |
| u64 lock_end; |
| bool merge = false; |
| int index; |
| |
| if (prev && prev->root_id == backref->root_id && |
| prev->inum == backref->inum && |
| prev->file_pos + prev->num_bytes == backref->file_pos) |
| merge = true; |
| |
| /* step 1: get root */ |
| key.objectid = backref->root_id; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| |
| index = srcu_read_lock(&fs_info->subvol_srcu); |
| |
| root = btrfs_read_fs_root_no_name(fs_info, &key); |
| if (IS_ERR(root)) { |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| if (PTR_ERR(root) == -ENOENT) |
| return 0; |
| return PTR_ERR(root); |
| } |
| |
| if (btrfs_root_readonly(root)) { |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| return 0; |
| } |
| |
| /* step 2: get inode */ |
| key.objectid = backref->inum; |
| key.type = BTRFS_INODE_ITEM_KEY; |
| key.offset = 0; |
| |
| inode = btrfs_iget(fs_info->sb, &key, root, NULL); |
| if (IS_ERR(inode)) { |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| return 0; |
| } |
| |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| |
| /* step 3: relink backref */ |
| lock_start = backref->file_pos; |
| lock_end = backref->file_pos + backref->num_bytes - 1; |
| lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, |
| &cached); |
| |
| ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); |
| if (ordered) { |
| btrfs_put_ordered_extent(ordered); |
| goto out_unlock; |
| } |
| |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| goto out_unlock; |
| } |
| |
| key.objectid = backref->inum; |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| key.offset = backref->file_pos; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (ret < 0) { |
| goto out_free_path; |
| } else if (ret > 0) { |
| ret = 0; |
| goto out_free_path; |
| } |
| |
| extent = btrfs_item_ptr(path->nodes[0], path->slots[0], |
| struct btrfs_file_extent_item); |
| |
| if (btrfs_file_extent_generation(path->nodes[0], extent) != |
| backref->generation) |
| goto out_free_path; |
| |
| btrfs_release_path(path); |
| |
| start = backref->file_pos; |
| if (backref->extent_offset < old->extent_offset + old->offset) |
| start += old->extent_offset + old->offset - |
| backref->extent_offset; |
| |
| len = min(backref->extent_offset + backref->num_bytes, |
| old->extent_offset + old->offset + old->len); |
| len -= max(backref->extent_offset, old->extent_offset + old->offset); |
| |
| ret = btrfs_drop_extents(trans, root, inode, start, |
| start + len, 1); |
| if (ret) |
| goto out_free_path; |
| again: |
| key.objectid = btrfs_ino(BTRFS_I(inode)); |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| key.offset = start; |
| |
| path->leave_spinning = 1; |
| if (merge) { |
| struct btrfs_file_extent_item *fi; |
| u64 extent_len; |
| struct btrfs_key found_key; |
| |
| ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
| if (ret < 0) |
| goto out_free_path; |
| |
| path->slots[0]--; |
| leaf = path->nodes[0]; |
| btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
| |
| fi = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| extent_len = btrfs_file_extent_num_bytes(leaf, fi); |
| |
| if (extent_len + found_key.offset == start && |
| relink_is_mergable(leaf, fi, new)) { |
| btrfs_set_file_extent_num_bytes(leaf, fi, |
| extent_len + len); |
| btrfs_mark_buffer_dirty(leaf); |
| inode_add_bytes(inode, len); |
| |
| ret = 1; |
| goto out_free_path; |
| } else { |
| merge = false; |
| btrfs_release_path(path); |
| goto again; |
| } |
| } |
| |
| ret = btrfs_insert_empty_item(trans, root, path, &key, |
| sizeof(*extent)); |
| if (ret) { |
| btrfs_abort_transaction(trans, ret); |
| goto out_free_path; |
| } |
| |
| leaf = path->nodes[0]; |
| item = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); |
| btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); |
| btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); |
| btrfs_set_file_extent_num_bytes(leaf, item, len); |
| btrfs_set_file_extent_ram_bytes(leaf, item, new->len); |
| btrfs_set_file_extent_generation(leaf, item, trans->transid); |
| btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); |
| btrfs_set_file_extent_compression(leaf, item, new->compress_type); |
| btrfs_set_file_extent_encryption(leaf, item, 0); |
| btrfs_set_file_extent_other_encoding(leaf, item, 0); |
| |
| btrfs_mark_buffer_dirty(leaf); |
| inode_add_bytes(inode, len); |
| btrfs_release_path(path); |
| |
| ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr, |
| new->disk_len, 0, |
| backref->root_id, backref->inum, |
| new->file_pos); /* start - extent_offset */ |
| if (ret) { |
| btrfs_abort_transaction(trans, ret); |
| goto out_free_path; |
| } |
| |
| ret = 1; |
| out_free_path: |
| btrfs_release_path(path); |
| path->leave_spinning = 0; |
| btrfs_end_transaction(trans); |
| out_unlock: |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, |
| &cached, GFP_NOFS); |
| iput(inode); |
| return ret; |
| } |
| |
| static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) |
| { |
| struct old_sa_defrag_extent *old, *tmp; |
| |
| if (!new) |
| return; |
| |
| list_for_each_entry_safe(old, tmp, &new->head, list) { |
| kfree(old); |
| } |
| kfree(new); |
| } |
| |
| static void relink_file_extents(struct new_sa_defrag_extent *new) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); |
| struct btrfs_path *path; |
| struct sa_defrag_extent_backref *backref; |
| struct sa_defrag_extent_backref *prev = NULL; |
| struct inode *inode; |
| struct btrfs_root *root; |
| struct rb_node *node; |
| int ret; |
| |
| inode = new->inode; |
| root = BTRFS_I(inode)->root; |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return; |
| |
| if (!record_extent_backrefs(path, new)) { |
| btrfs_free_path(path); |
| goto out; |
| } |
| btrfs_release_path(path); |
| |
| while (1) { |
| node = rb_first(&new->root); |
| if (!node) |
| break; |
| rb_erase(node, &new->root); |
| |
| backref = rb_entry(node, struct sa_defrag_extent_backref, node); |
| |
| ret = relink_extent_backref(path, prev, backref); |
| WARN_ON(ret < 0); |
| |
| kfree(prev); |
| |
| if (ret == 1) |
| prev = backref; |
| else |
| prev = NULL; |
| cond_resched(); |
| } |
| kfree(prev); |
| |
| btrfs_free_path(path); |
| out: |
| free_sa_defrag_extent(new); |
| |
| atomic_dec(&fs_info->defrag_running); |
| wake_up(&fs_info->transaction_wait); |
| } |
| |
| static struct new_sa_defrag_extent * |
| record_old_file_extents(struct inode *inode, |
| struct btrfs_ordered_extent *ordered) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_path *path; |
| struct btrfs_key key; |
| struct old_sa_defrag_extent *old; |
| struct new_sa_defrag_extent *new; |
| int ret; |
| |
| new = kmalloc(sizeof(*new), GFP_NOFS); |
| if (!new) |
| return NULL; |
| |
| new->inode = inode; |
| new->file_pos = ordered->file_offset; |
| new->len = ordered->len; |
| new->bytenr = ordered->start; |
| new->disk_len = ordered->disk_len; |
| new->compress_type = ordered->compress_type; |
| new->root = RB_ROOT; |
| INIT_LIST_HEAD(&new->head); |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| goto out_kfree; |
| |
| key.objectid = btrfs_ino(BTRFS_I(inode)); |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| key.offset = new->file_pos; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (ret < 0) |
| goto out_free_path; |
| if (ret > 0 && path->slots[0] > 0) |
| path->slots[0]--; |
| |
| /* find out all the old extents for the file range */ |
| while (1) { |
| struct btrfs_file_extent_item *extent; |
| struct extent_buffer *l; |
| int slot; |
| u64 num_bytes; |
| u64 offset; |
| u64 end; |
| u64 disk_bytenr; |
| u64 extent_offset; |
| |
| l = path->nodes[0]; |
| slot = path->slots[0]; |
| |
| if (slot >= btrfs_header_nritems(l)) { |
| ret = btrfs_next_leaf(root, path); |
| if (ret < 0) |
| goto out_free_path; |
| else if (ret > 0) |
| break; |
| continue; |
| } |
| |
| btrfs_item_key_to_cpu(l, &key, slot); |
| |
| if (key.objectid != btrfs_ino(BTRFS_I(inode))) |
| break; |
| if (key.type != BTRFS_EXTENT_DATA_KEY) |
| break; |
| if (key.offset >= new->file_pos + new->len) |
| break; |
| |
| extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); |
| |
| num_bytes = btrfs_file_extent_num_bytes(l, extent); |
| if (key.offset + num_bytes < new->file_pos) |
| goto next; |
| |
| disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); |
| if (!disk_bytenr) |
| goto next; |
| |
| extent_offset = btrfs_file_extent_offset(l, extent); |
| |
| old = kmalloc(sizeof(*old), GFP_NOFS); |
| if (!old) |
| goto out_free_path; |
| |
| offset = max(new->file_pos, key.offset); |
| end = min(new->file_pos + new->len, key.offset + num_bytes); |
| |
| old->bytenr = disk_bytenr; |
| old->extent_offset = extent_offset; |
| old->offset = offset - key.offset; |
| old->len = end - offset; |
| old->new = new; |
| old->count = 0; |
| list_add_tail(&old->list, &new->head); |
| next: |
| path->slots[0]++; |
| cond_resched(); |
| } |
| |
| btrfs_free_path(path); |
| atomic_inc(&fs_info->defrag_running); |
| |
| return new; |
| |
| out_free_path: |
| btrfs_free_path(path); |
| out_kfree: |
| free_sa_defrag_extent(new); |
| return NULL; |
| } |
| |
| static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, |
| u64 start, u64 len) |
| { |
| struct btrfs_block_group_cache *cache; |
| |
| cache = btrfs_lookup_block_group(fs_info, start); |
| ASSERT(cache); |
| |
| spin_lock(&cache->lock); |
| cache->delalloc_bytes -= len; |
| spin_unlock(&cache->lock); |
| |
| btrfs_put_block_group(cache); |
| } |
| |
| /* as ordered data IO finishes, this gets called so we can finish |
| * an ordered extent if the range of bytes in the file it covers are |
| * fully written. |
| */ |
| static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) |
| { |
| struct inode *inode = ordered_extent->inode; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_trans_handle *trans = NULL; |
| struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| struct extent_state *cached_state = NULL; |
| struct new_sa_defrag_extent *new = NULL; |
| int compress_type = 0; |
| int ret = 0; |
| u64 logical_len = ordered_extent->len; |
| bool nolock; |
| bool truncated = false; |
| bool range_locked = false; |
| bool clear_new_delalloc_bytes = false; |
| |
| if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && |
| !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && |
| !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) |
| clear_new_delalloc_bytes = true; |
| |
| nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); |
| |
| if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { |
| ret = -EIO; |
| goto out; |
| } |
| |
| btrfs_free_io_failure_record(BTRFS_I(inode), |
| ordered_extent->file_offset, |
| ordered_extent->file_offset + |
| ordered_extent->len - 1); |
| |
| if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { |
| truncated = true; |
| logical_len = ordered_extent->truncated_len; |
| /* Truncated the entire extent, don't bother adding */ |
| if (!logical_len) |
| goto out; |
| } |
| |
| if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
| BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ |
| |
| /* |
| * For mwrite(mmap + memset to write) case, we still reserve |
| * space for NOCOW range. |
| * As NOCOW won't cause a new delayed ref, just free the space |
| */ |
| btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, |
| ordered_extent->len); |
| btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
| if (nolock) |
| trans = btrfs_join_transaction_nolock(root); |
| else |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| trans = NULL; |
| goto out; |
| } |
| trans->block_rsv = &fs_info->delalloc_block_rsv; |
| ret = btrfs_update_inode_fallback(trans, root, inode); |
| if (ret) /* -ENOMEM or corruption */ |
| btrfs_abort_transaction(trans, ret); |
| goto out; |
| } |
| |
| range_locked = true; |
| lock_extent_bits(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + ordered_extent->len - 1, |
| &cached_state); |
| |
| ret = test_range_bit(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + ordered_extent->len - 1, |
| EXTENT_DEFRAG, 0, cached_state); |
| if (ret) { |
| u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); |
| if (0 && last_snapshot >= BTRFS_I(inode)->generation) |
| /* the inode is shared */ |
| new = record_old_file_extents(inode, ordered_extent); |
| |
| clear_extent_bit(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + ordered_extent->len - 1, |
| EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); |
| } |
| |
| if (nolock) |
| trans = btrfs_join_transaction_nolock(root); |
| else |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| trans = NULL; |
| goto out; |
| } |
| |
| trans->block_rsv = &fs_info->delalloc_block_rsv; |
| |
| if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
| compress_type = ordered_extent->compress_type; |
| if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
| BUG_ON(compress_type); |
| ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), |
| ordered_extent->file_offset, |
| ordered_extent->file_offset + |
| logical_len); |
| } else { |
| BUG_ON(root == fs_info->tree_root); |
| ret = insert_reserved_file_extent(trans, inode, |
| ordered_extent->file_offset, |
| ordered_extent->start, |
| ordered_extent->disk_len, |
| logical_len, logical_len, |
| compress_type, 0, 0, |
| BTRFS_FILE_EXTENT_REG); |
| if (!ret) |
| btrfs_release_delalloc_bytes(fs_info, |
| ordered_extent->start, |
| ordered_extent->disk_len); |
| } |
| unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
| ordered_extent->file_offset, ordered_extent->len, |
| trans->transid); |
| if (ret < 0) { |
| btrfs_abort_transaction(trans, ret); |
| goto out; |
| } |
| |
| add_pending_csums(trans, inode, &ordered_extent->list); |
| |
| btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
| ret = btrfs_update_inode_fallback(trans, root, inode); |
| if (ret) { /* -ENOMEM or corruption */ |
| btrfs_abort_transaction(trans, ret); |
| goto out; |
| } |
| ret = 0; |
| out: |
| if (range_locked || clear_new_delalloc_bytes) { |
| unsigned int clear_bits = 0; |
| |
| if (range_locked) |
| clear_bits |= EXTENT_LOCKED; |
| if (clear_new_delalloc_bytes) |
| clear_bits |= EXTENT_DELALLOC_NEW; |
| clear_extent_bit(&BTRFS_I(inode)->io_tree, |
| ordered_extent->file_offset, |
| ordered_extent->file_offset + |
| ordered_extent->len - 1, |
| clear_bits, |
| (clear_bits & EXTENT_LOCKED) ? 1 : 0, |
| 0, &cached_state, GFP_NOFS); |
| } |
| |
| if (root != fs_info->tree_root) |
| btrfs_delalloc_release_metadata(BTRFS_I(inode), |
| ordered_extent->len); |
| if |