| /* |
| * Copyright (C) 2007 Oracle. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public |
| * License v2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public |
| * License along with this program; if not, write to the |
| * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| * Boston, MA 021110-1307, USA. |
| */ |
| |
| #include <linux/kernel.h> |
| #include <linux/bio.h> |
| #include <linux/buffer_head.h> |
| #include <linux/file.h> |
| #include <linux/fs.h> |
| #include <linux/pagemap.h> |
| #include <linux/highmem.h> |
| #include <linux/time.h> |
| #include <linux/init.h> |
| #include <linux/string.h> |
| #include <linux/backing-dev.h> |
| #include <linux/mpage.h> |
| #include <linux/swap.h> |
| #include <linux/writeback.h> |
| #include <linux/statfs.h> |
| #include <linux/compat.h> |
| #include <linux/bit_spinlock.h> |
| #include <linux/xattr.h> |
| #include <linux/posix_acl.h> |
| #include <linux/falloc.h> |
| #include <linux/slab.h> |
| #include <linux/ratelimit.h> |
| #include <linux/mount.h> |
| #include <linux/btrfs.h> |
| #include <linux/blkdev.h> |
| #include <linux/posix_acl_xattr.h> |
| #include <linux/uio.h> |
| #include "ctree.h" |
| #include "disk-io.h" |
| #include "transaction.h" |
| #include "btrfs_inode.h" |
| #include "print-tree.h" |
| #include "ordered-data.h" |
| #include "xattr.h" |
| #include "tree-log.h" |
| #include "volumes.h" |
| #include "compression.h" |
| #include "locking.h" |
| #include "free-space-cache.h" |
| #include "inode-map.h" |
| #include "backref.h" |
| #include "hash.h" |
| #include "props.h" |
| #include "qgroup.h" |
| |
| struct btrfs_iget_args { |
| struct btrfs_key *location; |
| struct btrfs_root *root; |
| }; |
| |
| static const struct inode_operations btrfs_dir_inode_operations; |
| static const struct inode_operations btrfs_symlink_inode_operations; |
| static const struct inode_operations btrfs_dir_ro_inode_operations; |
| static const struct inode_operations btrfs_special_inode_operations; |
| static const struct inode_operations btrfs_file_inode_operations; |
| static const struct address_space_operations btrfs_aops; |
| static const struct address_space_operations btrfs_symlink_aops; |
| static const struct file_operations btrfs_dir_file_operations; |
| static struct extent_io_ops btrfs_extent_io_ops; |
| |
| static struct kmem_cache *btrfs_inode_cachep; |
| static struct kmem_cache *btrfs_delalloc_work_cachep; |
| struct kmem_cache *btrfs_trans_handle_cachep; |
| struct kmem_cache *btrfs_transaction_cachep; |
| struct kmem_cache *btrfs_path_cachep; |
| struct kmem_cache *btrfs_free_space_cachep; |
| |
| #define S_SHIFT 12 |
| static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { |
| [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, |
| [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, |
| [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, |
| [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, |
| [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, |
| [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, |
| [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, |
| }; |
| |
| static int btrfs_setsize(struct inode *inode, struct iattr *attr); |
| static int btrfs_truncate(struct inode *inode); |
| static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); |
| static noinline int cow_file_range(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, int *page_started, |
| unsigned long *nr_written, int unlock); |
| static struct extent_map *create_pinned_em(struct inode *inode, u64 start, |
| u64 len, u64 orig_start, |
| u64 block_start, u64 block_len, |
| u64 orig_block_len, u64 ram_bytes, |
| int type); |
| |
| static int btrfs_dirty_inode(struct inode *inode); |
| |
| #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| void btrfs_test_inode_set_ops(struct inode *inode) |
| { |
| BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
| } |
| #endif |
| |
| static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
| struct inode *inode, struct inode *dir, |
| const struct qstr *qstr) |
| { |
| int err; |
| |
| err = btrfs_init_acl(trans, inode, dir); |
| if (!err) |
| err = btrfs_xattr_security_init(trans, inode, dir, qstr); |
| return err; |
| } |
| |
| /* |
| * this does all the hard work for inserting an inline extent into |
| * the btree. The caller should have done a btrfs_drop_extents so that |
| * no overlapping inline items exist in the btree |
| */ |
| static int insert_inline_extent(struct btrfs_trans_handle *trans, |
| struct btrfs_path *path, int extent_inserted, |
| struct btrfs_root *root, struct inode *inode, |
| u64 start, size_t size, size_t compressed_size, |
| int compress_type, |
| struct page **compressed_pages) |
| { |
| struct extent_buffer *leaf; |
| struct page *page = NULL; |
| char *kaddr; |
| unsigned long ptr; |
| struct btrfs_file_extent_item *ei; |
| int err = 0; |
| int ret; |
| size_t cur_size = size; |
| unsigned long offset; |
| |
| if (compressed_size && compressed_pages) |
| cur_size = compressed_size; |
| |
| inode_add_bytes(inode, size); |
| |
| if (!extent_inserted) { |
| struct btrfs_key key; |
| size_t datasize; |
| |
| key.objectid = btrfs_ino(inode); |
| key.offset = start; |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| |
| datasize = btrfs_file_extent_calc_inline_size(cur_size); |
| path->leave_spinning = 1; |
| ret = btrfs_insert_empty_item(trans, root, path, &key, |
| datasize); |
| if (ret) { |
| err = ret; |
| goto fail; |
| } |
| } |
| leaf = path->nodes[0]; |
| ei = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| btrfs_set_file_extent_generation(leaf, ei, trans->transid); |
| btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); |
| btrfs_set_file_extent_encryption(leaf, ei, 0); |
| btrfs_set_file_extent_other_encoding(leaf, ei, 0); |
| btrfs_set_file_extent_ram_bytes(leaf, ei, size); |
| ptr = btrfs_file_extent_inline_start(ei); |
| |
| if (compress_type != BTRFS_COMPRESS_NONE) { |
| struct page *cpage; |
| int i = 0; |
| while (compressed_size > 0) { |
| cpage = compressed_pages[i]; |
| cur_size = min_t(unsigned long, compressed_size, |
| PAGE_CACHE_SIZE); |
| |
| kaddr = kmap_atomic(cpage); |
| write_extent_buffer(leaf, kaddr, ptr, cur_size); |
| kunmap_atomic(kaddr); |
| |
| i++; |
| ptr += cur_size; |
| compressed_size -= cur_size; |
| } |
| btrfs_set_file_extent_compression(leaf, ei, |
| compress_type); |
| } else { |
| page = find_get_page(inode->i_mapping, |
| start >> PAGE_CACHE_SHIFT); |
| btrfs_set_file_extent_compression(leaf, ei, 0); |
| kaddr = kmap_atomic(page); |
| offset = start & (PAGE_CACHE_SIZE - 1); |
| write_extent_buffer(leaf, kaddr + offset, ptr, size); |
| kunmap_atomic(kaddr); |
| page_cache_release(page); |
| } |
| btrfs_mark_buffer_dirty(leaf); |
| btrfs_release_path(path); |
| |
| /* |
| * we're an inline extent, so nobody can |
| * extend the file past i_size without locking |
| * a page we already have locked. |
| * |
| * We must do any isize and inode updates |
| * before we unlock the pages. Otherwise we |
| * could end up racing with unlink. |
| */ |
| BTRFS_I(inode)->disk_i_size = inode->i_size; |
| ret = btrfs_update_inode(trans, root, inode); |
| |
| return ret; |
| fail: |
| return err; |
| } |
| |
| |
| /* |
| * conditionally insert an inline extent into the file. This |
| * does the checks required to make sure the data is small enough |
| * to fit as an inline extent. |
| */ |
| static noinline int cow_file_range_inline(struct btrfs_root *root, |
| struct inode *inode, u64 start, |
| u64 end, size_t compressed_size, |
| int compress_type, |
| struct page **compressed_pages) |
| { |
| struct btrfs_trans_handle *trans; |
| u64 isize = i_size_read(inode); |
| u64 actual_end = min(end + 1, isize); |
| u64 inline_len = actual_end - start; |
| u64 aligned_end = ALIGN(end, root->sectorsize); |
| u64 data_len = inline_len; |
| int ret; |
| struct btrfs_path *path; |
| int extent_inserted = 0; |
| u32 extent_item_size; |
| |
| if (compressed_size) |
| data_len = compressed_size; |
| |
| if (start > 0 || |
| actual_end > PAGE_CACHE_SIZE || |
| data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) || |
| (!compressed_size && |
| (actual_end & (root->sectorsize - 1)) == 0) || |
| end + 1 < isize || |
| data_len > root->fs_info->max_inline) { |
| return 1; |
| } |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| btrfs_free_path(path); |
| return PTR_ERR(trans); |
| } |
| trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
| |
| if (compressed_size && compressed_pages) |
| extent_item_size = btrfs_file_extent_calc_inline_size( |
| compressed_size); |
| else |
| extent_item_size = btrfs_file_extent_calc_inline_size( |
| inline_len); |
| |
| ret = __btrfs_drop_extents(trans, root, inode, path, |
| start, aligned_end, NULL, |
| 1, 1, extent_item_size, &extent_inserted); |
| if (ret) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto out; |
| } |
| |
| if (isize > actual_end) |
| inline_len = min_t(u64, isize, actual_end); |
| ret = insert_inline_extent(trans, path, extent_inserted, |
| root, inode, start, |
| inline_len, compressed_size, |
| compress_type, compressed_pages); |
| if (ret && ret != -ENOSPC) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto out; |
| } else if (ret == -ENOSPC) { |
| ret = 1; |
| goto out; |
| } |
| |
| set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); |
| btrfs_delalloc_release_metadata(inode, end + 1 - start); |
| btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); |
| out: |
| /* |
| * Don't forget to free the reserved space, as for inlined extent |
| * it won't count as data extent, free them directly here. |
| * And at reserve time, it's always aligned to page size, so |
| * just free one page here. |
| */ |
| btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); |
| btrfs_free_path(path); |
| btrfs_end_transaction(trans, root); |
| return ret; |
| } |
| |
| struct async_extent { |
| u64 start; |
| u64 ram_size; |
| u64 compressed_size; |
| struct page **pages; |
| unsigned long nr_pages; |
| int compress_type; |
| struct list_head list; |
| }; |
| |
| struct async_cow { |
| struct inode *inode; |
| struct btrfs_root *root; |
| struct page *locked_page; |
| u64 start; |
| u64 end; |
| struct list_head extents; |
| struct btrfs_work work; |
| }; |
| |
| static noinline int add_async_extent(struct async_cow *cow, |
| u64 start, u64 ram_size, |
| u64 compressed_size, |
| struct page **pages, |
| unsigned long nr_pages, |
| int compress_type) |
| { |
| struct async_extent *async_extent; |
| |
| async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); |
| BUG_ON(!async_extent); /* -ENOMEM */ |
| async_extent->start = start; |
| async_extent->ram_size = ram_size; |
| async_extent->compressed_size = compressed_size; |
| async_extent->pages = pages; |
| async_extent->nr_pages = nr_pages; |
| async_extent->compress_type = compress_type; |
| list_add_tail(&async_extent->list, &cow->extents); |
| return 0; |
| } |
| |
| static inline int inode_need_compress(struct inode *inode) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| |
| /* force compress */ |
| if (btrfs_test_opt(root, FORCE_COMPRESS)) |
| return 1; |
| /* bad compression ratios */ |
| if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) |
| return 0; |
| if (btrfs_test_opt(root, COMPRESS) || |
| BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || |
| BTRFS_I(inode)->force_compress) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * we create compressed extents in two phases. The first |
| * phase compresses a range of pages that have already been |
| * locked (both pages and state bits are locked). |
| * |
| * This is done inside an ordered work queue, and the compression |
| * is spread across many cpus. The actual IO submission is step |
| * two, and the ordered work queue takes care of making sure that |
| * happens in the same order things were put onto the queue by |
| * writepages and friends. |
| * |
| * If this code finds it can't get good compression, it puts an |
| * entry onto the work queue to write the uncompressed bytes. This |
| * makes sure that both compressed inodes and uncompressed inodes |
| * are written in the same order that the flusher thread sent them |
| * down. |
| */ |
| static noinline void compress_file_range(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, |
| struct async_cow *async_cow, |
| int *num_added) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| u64 num_bytes; |
| u64 blocksize = root->sectorsize; |
| u64 actual_end; |
| u64 isize = i_size_read(inode); |
| int ret = 0; |
| struct page **pages = NULL; |
| unsigned long nr_pages; |
| unsigned long nr_pages_ret = 0; |
| unsigned long total_compressed = 0; |
| unsigned long total_in = 0; |
| unsigned long max_compressed = 128 * 1024; |
| unsigned long max_uncompressed = 128 * 1024; |
| int i; |
| int will_compress; |
| int compress_type = root->fs_info->compress_type; |
| int redirty = 0; |
| |
| /* if this is a small write inside eof, kick off a defrag */ |
| if ((end - start + 1) < 16 * 1024 && |
| (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) |
| btrfs_add_inode_defrag(NULL, inode); |
| |
| actual_end = min_t(u64, isize, end + 1); |
| again: |
| will_compress = 0; |
| nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; |
| nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); |
| |
| /* |
| * we don't want to send crud past the end of i_size through |
| * compression, that's just a waste of CPU time. So, if the |
| * end of the file is before the start of our current |
| * requested range of bytes, we bail out to the uncompressed |
| * cleanup code that can deal with all of this. |
| * |
| * It isn't really the fastest way to fix things, but this is a |
| * very uncommon corner. |
| */ |
| if (actual_end <= start) |
| goto cleanup_and_bail_uncompressed; |
| |
| total_compressed = actual_end - start; |
| |
| /* |
| * skip compression for a small file range(<=blocksize) that |
| * isn't an inline extent, since it dosen't save disk space at all. |
| */ |
| if (total_compressed <= blocksize && |
| (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) |
| goto cleanup_and_bail_uncompressed; |
| |
| /* we want to make sure that amount of ram required to uncompress |
| * an extent is reasonable, so we limit the total size in ram |
| * of a compressed extent to 128k. This is a crucial number |
| * because it also controls how easily we can spread reads across |
| * cpus for decompression. |
| * |
| * We also want to make sure the amount of IO required to do |
| * a random read is reasonably small, so we limit the size of |
| * a compressed extent to 128k. |
| */ |
| total_compressed = min(total_compressed, max_uncompressed); |
| num_bytes = ALIGN(end - start + 1, blocksize); |
| num_bytes = max(blocksize, num_bytes); |
| total_in = 0; |
| ret = 0; |
| |
| /* |
| * we do compression for mount -o compress and when the |
| * inode has not been flagged as nocompress. This flag can |
| * change at any time if we discover bad compression ratios. |
| */ |
| if (inode_need_compress(inode)) { |
| WARN_ON(pages); |
| pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); |
| if (!pages) { |
| /* just bail out to the uncompressed code */ |
| goto cont; |
| } |
| |
| if (BTRFS_I(inode)->force_compress) |
| compress_type = BTRFS_I(inode)->force_compress; |
| |
| /* |
| * we need to call clear_page_dirty_for_io on each |
| * page in the range. Otherwise applications with the file |
| * mmap'd can wander in and change the page contents while |
| * we are compressing them. |
| * |
| * If the compression fails for any reason, we set the pages |
| * dirty again later on. |
| */ |
| extent_range_clear_dirty_for_io(inode, start, end); |
| redirty = 1; |
| ret = btrfs_compress_pages(compress_type, |
| inode->i_mapping, start, |
| total_compressed, pages, |
| nr_pages, &nr_pages_ret, |
| &total_in, |
| &total_compressed, |
| max_compressed); |
| |
| if (!ret) { |
| unsigned long offset = total_compressed & |
| (PAGE_CACHE_SIZE - 1); |
| struct page *page = pages[nr_pages_ret - 1]; |
| char *kaddr; |
| |
| /* zero the tail end of the last page, we might be |
| * sending it down to disk |
| */ |
| if (offset) { |
| kaddr = kmap_atomic(page); |
| memset(kaddr + offset, 0, |
| PAGE_CACHE_SIZE - offset); |
| kunmap_atomic(kaddr); |
| } |
| will_compress = 1; |
| } |
| } |
| cont: |
| if (start == 0) { |
| /* lets try to make an inline extent */ |
| if (ret || total_in < (actual_end - start)) { |
| /* we didn't compress the entire range, try |
| * to make an uncompressed inline extent. |
| */ |
| ret = cow_file_range_inline(root, inode, start, end, |
| 0, 0, NULL); |
| } else { |
| /* try making a compressed inline extent */ |
| ret = cow_file_range_inline(root, inode, start, end, |
| total_compressed, |
| compress_type, pages); |
| } |
| if (ret <= 0) { |
| unsigned long clear_flags = EXTENT_DELALLOC | |
| EXTENT_DEFRAG; |
| unsigned long page_error_op; |
| |
| clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; |
| page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; |
| |
| /* |
| * inline extent creation worked or returned error, |
| * we don't need to create any more async work items. |
| * Unlock and free up our temp pages. |
| */ |
| extent_clear_unlock_delalloc(inode, start, end, NULL, |
| clear_flags, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| page_error_op | |
| PAGE_END_WRITEBACK); |
| goto free_pages_out; |
| } |
| } |
| |
| if (will_compress) { |
| /* |
| * we aren't doing an inline extent round the compressed size |
| * up to a block size boundary so the allocator does sane |
| * things |
| */ |
| total_compressed = ALIGN(total_compressed, blocksize); |
| |
| /* |
| * one last check to make sure the compression is really a |
| * win, compare the page count read with the blocks on disk |
| */ |
| total_in = ALIGN(total_in, PAGE_CACHE_SIZE); |
| if (total_compressed >= total_in) { |
| will_compress = 0; |
| } else { |
| num_bytes = total_in; |
| } |
| } |
| if (!will_compress && pages) { |
| /* |
| * the compression code ran but failed to make things smaller, |
| * free any pages it allocated and our page pointer array |
| */ |
| for (i = 0; i < nr_pages_ret; i++) { |
| WARN_ON(pages[i]->mapping); |
| page_cache_release(pages[i]); |
| } |
| kfree(pages); |
| pages = NULL; |
| total_compressed = 0; |
| nr_pages_ret = 0; |
| |
| /* flag the file so we don't compress in the future */ |
| if (!btrfs_test_opt(root, FORCE_COMPRESS) && |
| !(BTRFS_I(inode)->force_compress)) { |
| BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
| } |
| } |
| if (will_compress) { |
| *num_added += 1; |
| |
| /* the async work queues will take care of doing actual |
| * allocation on disk for these compressed pages, |
| * and will submit them to the elevator. |
| */ |
| add_async_extent(async_cow, start, num_bytes, |
| total_compressed, pages, nr_pages_ret, |
| compress_type); |
| |
| if (start + num_bytes < end) { |
| start += num_bytes; |
| pages = NULL; |
| cond_resched(); |
| goto again; |
| } |
| } else { |
| cleanup_and_bail_uncompressed: |
| /* |
| * No compression, but we still need to write the pages in |
| * the file we've been given so far. redirty the locked |
| * page if it corresponds to our extent and set things up |
| * for the async work queue to run cow_file_range to do |
| * the normal delalloc dance |
| */ |
| if (page_offset(locked_page) >= start && |
| page_offset(locked_page) <= end) { |
| __set_page_dirty_nobuffers(locked_page); |
| /* unlocked later on in the async handlers */ |
| } |
| if (redirty) |
| extent_range_redirty_for_io(inode, start, end); |
| add_async_extent(async_cow, start, end - start + 1, |
| 0, NULL, 0, BTRFS_COMPRESS_NONE); |
| *num_added += 1; |
| } |
| |
| return; |
| |
| free_pages_out: |
| for (i = 0; i < nr_pages_ret; i++) { |
| WARN_ON(pages[i]->mapping); |
| page_cache_release(pages[i]); |
| } |
| kfree(pages); |
| } |
| |
| static void free_async_extent_pages(struct async_extent *async_extent) |
| { |
| int i; |
| |
| if (!async_extent->pages) |
| return; |
| |
| for (i = 0; i < async_extent->nr_pages; i++) { |
| WARN_ON(async_extent->pages[i]->mapping); |
| page_cache_release(async_extent->pages[i]); |
| } |
| kfree(async_extent->pages); |
| async_extent->nr_pages = 0; |
| async_extent->pages = NULL; |
| } |
| |
| /* |
| * phase two of compressed writeback. This is the ordered portion |
| * of the code, which only gets called in the order the work was |
| * queued. We walk all the async extents created by compress_file_range |
| * and send them down to the disk. |
| */ |
| static noinline void submit_compressed_extents(struct inode *inode, |
| struct async_cow *async_cow) |
| { |
| struct async_extent *async_extent; |
| u64 alloc_hint = 0; |
| struct btrfs_key ins; |
| struct extent_map *em; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| struct extent_io_tree *io_tree; |
| int ret = 0; |
| |
| again: |
| while (!list_empty(&async_cow->extents)) { |
| async_extent = list_entry(async_cow->extents.next, |
| struct async_extent, list); |
| list_del(&async_extent->list); |
| |
| io_tree = &BTRFS_I(inode)->io_tree; |
| |
| retry: |
| /* did the compression code fall back to uncompressed IO? */ |
| if (!async_extent->pages) { |
| int page_started = 0; |
| unsigned long nr_written = 0; |
| |
| lock_extent(io_tree, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1); |
| |
| /* allocate blocks */ |
| ret = cow_file_range(inode, async_cow->locked_page, |
| async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| &page_started, &nr_written, 0); |
| |
| /* JDM XXX */ |
| |
| /* |
| * if page_started, cow_file_range inserted an |
| * inline extent and took care of all the unlocking |
| * and IO for us. Otherwise, we need to submit |
| * all those pages down to the drive. |
| */ |
| if (!page_started && !ret) |
| extent_write_locked_range(io_tree, |
| inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| btrfs_get_extent, |
| WB_SYNC_ALL); |
| else if (ret) |
| unlock_page(async_cow->locked_page); |
| kfree(async_extent); |
| cond_resched(); |
| continue; |
| } |
| |
| lock_extent(io_tree, async_extent->start, |
| async_extent->start + async_extent->ram_size - 1); |
| |
| ret = btrfs_reserve_extent(root, |
| async_extent->compressed_size, |
| async_extent->compressed_size, |
| 0, alloc_hint, &ins, 1, 1); |
| if (ret) { |
| free_async_extent_pages(async_extent); |
| |
| if (ret == -ENOSPC) { |
| unlock_extent(io_tree, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1); |
| |
| /* |
| * we need to redirty the pages if we decide to |
| * fallback to uncompressed IO, otherwise we |
| * will not submit these pages down to lower |
| * layers. |
| */ |
| extent_range_redirty_for_io(inode, |
| async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1); |
| |
| goto retry; |
| } |
| goto out_free; |
| } |
| /* |
| * here we're doing allocation and writeback of the |
| * compressed pages |
| */ |
| btrfs_drop_extent_cache(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, 0); |
| |
| em = alloc_extent_map(); |
| if (!em) { |
| ret = -ENOMEM; |
| goto out_free_reserve; |
| } |
| em->start = async_extent->start; |
| em->len = async_extent->ram_size; |
| em->orig_start = em->start; |
| em->mod_start = em->start; |
| em->mod_len = em->len; |
| |
| em->block_start = ins.objectid; |
| em->block_len = ins.offset; |
| em->orig_block_len = ins.offset; |
| em->ram_bytes = async_extent->ram_size; |
| em->bdev = root->fs_info->fs_devices->latest_bdev; |
| em->compress_type = async_extent->compress_type; |
| set_bit(EXTENT_FLAG_PINNED, &em->flags); |
| set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
| em->generation = -1; |
| |
| while (1) { |
| write_lock(&em_tree->lock); |
| ret = add_extent_mapping(em_tree, em, 1); |
| write_unlock(&em_tree->lock); |
| if (ret != -EEXIST) { |
| free_extent_map(em); |
| break; |
| } |
| btrfs_drop_extent_cache(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, 0); |
| } |
| |
| if (ret) |
| goto out_free_reserve; |
| |
| ret = btrfs_add_ordered_extent_compress(inode, |
| async_extent->start, |
| ins.objectid, |
| async_extent->ram_size, |
| ins.offset, |
| BTRFS_ORDERED_COMPRESSED, |
| async_extent->compress_type); |
| if (ret) { |
| btrfs_drop_extent_cache(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, 0); |
| goto out_free_reserve; |
| } |
| |
| /* |
| * clear dirty, set writeback and unlock the pages. |
| */ |
| extent_clear_unlock_delalloc(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| NULL, EXTENT_LOCKED | EXTENT_DELALLOC, |
| PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK); |
| ret = btrfs_submit_compressed_write(inode, |
| async_extent->start, |
| async_extent->ram_size, |
| ins.objectid, |
| ins.offset, async_extent->pages, |
| async_extent->nr_pages); |
| if (ret) { |
| struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
| struct page *p = async_extent->pages[0]; |
| const u64 start = async_extent->start; |
| const u64 end = start + async_extent->ram_size - 1; |
| |
| p->mapping = inode->i_mapping; |
| tree->ops->writepage_end_io_hook(p, start, end, |
| NULL, 0); |
| p->mapping = NULL; |
| extent_clear_unlock_delalloc(inode, start, end, NULL, 0, |
| PAGE_END_WRITEBACK | |
| PAGE_SET_ERROR); |
| free_async_extent_pages(async_extent); |
| } |
| alloc_hint = ins.objectid + ins.offset; |
| kfree(async_extent); |
| cond_resched(); |
| } |
| return; |
| out_free_reserve: |
| btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); |
| out_free: |
| extent_clear_unlock_delalloc(inode, async_extent->start, |
| async_extent->start + |
| async_extent->ram_size - 1, |
| NULL, EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, |
| PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | |
| PAGE_SET_ERROR); |
| free_async_extent_pages(async_extent); |
| kfree(async_extent); |
| goto again; |
| } |
| |
| static u64 get_extent_allocation_hint(struct inode *inode, u64 start, |
| u64 num_bytes) |
| { |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| struct extent_map *em; |
| u64 alloc_hint = 0; |
| |
| read_lock(&em_tree->lock); |
| em = search_extent_mapping(em_tree, start, num_bytes); |
| if (em) { |
| /* |
| * if block start isn't an actual block number then find the |
| * first block in this inode and use that as a hint. If that |
| * block is also bogus then just don't worry about it. |
| */ |
| if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
| free_extent_map(em); |
| em = search_extent_mapping(em_tree, 0, 0); |
| if (em && em->block_start < EXTENT_MAP_LAST_BYTE) |
| alloc_hint = em->block_start; |
| if (em) |
| free_extent_map(em); |
| } else { |
| alloc_hint = em->block_start; |
| free_extent_map(em); |
| } |
| } |
| read_unlock(&em_tree->lock); |
| |
| return alloc_hint; |
| } |
| |
| /* |
| * when extent_io.c finds a delayed allocation range in the file, |
| * the call backs end up in this code. The basic idea is to |
| * allocate extents on disk for the range, and create ordered data structs |
| * in ram to track those extents. |
| * |
| * locked_page is the page that writepage had locked already. We use |
| * it to make sure we don't do extra locks or unlocks. |
| * |
| * *page_started is set to one if we unlock locked_page and do everything |
| * required to start IO on it. It may be clean and already done with |
| * IO when we return. |
| */ |
| static noinline int cow_file_range(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, int *page_started, |
| unsigned long *nr_written, |
| int unlock) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| u64 alloc_hint = 0; |
| u64 num_bytes; |
| unsigned long ram_size; |
| u64 disk_num_bytes; |
| u64 cur_alloc_size; |
| u64 blocksize = root->sectorsize; |
| struct btrfs_key ins; |
| struct extent_map *em; |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| int ret = 0; |
| |
| if (btrfs_is_free_space_inode(inode)) { |
| WARN_ON_ONCE(1); |
| ret = -EINVAL; |
| goto out_unlock; |
| } |
| |
| num_bytes = ALIGN(end - start + 1, blocksize); |
| num_bytes = max(blocksize, num_bytes); |
| disk_num_bytes = num_bytes; |
| |
| /* if this is a small write inside eof, kick off defrag */ |
| if (num_bytes < 64 * 1024 && |
| (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) |
| btrfs_add_inode_defrag(NULL, inode); |
| |
| if (start == 0) { |
| /* lets try to make an inline extent */ |
| ret = cow_file_range_inline(root, inode, start, end, 0, 0, |
| NULL); |
| if (ret == 0) { |
| extent_clear_unlock_delalloc(inode, start, end, NULL, |
| EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DEFRAG, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| |
| *nr_written = *nr_written + |
| (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; |
| *page_started = 1; |
| goto out; |
| } else if (ret < 0) { |
| goto out_unlock; |
| } |
| } |
| |
| BUG_ON(disk_num_bytes > |
| btrfs_super_total_bytes(root->fs_info->super_copy)); |
| |
| alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); |
| btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); |
| |
| while (disk_num_bytes > 0) { |
| unsigned long op; |
| |
| cur_alloc_size = disk_num_bytes; |
| ret = btrfs_reserve_extent(root, cur_alloc_size, |
| root->sectorsize, 0, alloc_hint, |
| &ins, 1, 1); |
| if (ret < 0) |
| goto out_unlock; |
| |
| em = alloc_extent_map(); |
| if (!em) { |
| ret = -ENOMEM; |
| goto out_reserve; |
| } |
| em->start = start; |
| em->orig_start = em->start; |
| ram_size = ins.offset; |
| em->len = ins.offset; |
| em->mod_start = em->start; |
| em->mod_len = em->len; |
| |
| em->block_start = ins.objectid; |
| em->block_len = ins.offset; |
| em->orig_block_len = ins.offset; |
| em->ram_bytes = ram_size; |
| em->bdev = root->fs_info->fs_devices->latest_bdev; |
| set_bit(EXTENT_FLAG_PINNED, &em->flags); |
| em->generation = -1; |
| |
| while (1) { |
| write_lock(&em_tree->lock); |
| ret = add_extent_mapping(em_tree, em, 1); |
| write_unlock(&em_tree->lock); |
| if (ret != -EEXIST) { |
| free_extent_map(em); |
| break; |
| } |
| btrfs_drop_extent_cache(inode, start, |
| start + ram_size - 1, 0); |
| } |
| if (ret) |
| goto out_reserve; |
| |
| cur_alloc_size = ins.offset; |
| ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
| ram_size, cur_alloc_size, 0); |
| if (ret) |
| goto out_drop_extent_cache; |
| |
| if (root->root_key.objectid == |
| BTRFS_DATA_RELOC_TREE_OBJECTID) { |
| ret = btrfs_reloc_clone_csums(inode, start, |
| cur_alloc_size); |
| if (ret) |
| goto out_drop_extent_cache; |
| } |
| |
| if (disk_num_bytes < cur_alloc_size) |
| break; |
| |
| /* we're not doing compressed IO, don't unlock the first |
| * page (which the caller expects to stay locked), don't |
| * clear any dirty bits and don't set any writeback bits |
| * |
| * Do set the Private2 bit so we know this page was properly |
| * setup for writepage |
| */ |
| op = unlock ? PAGE_UNLOCK : 0; |
| op |= PAGE_SET_PRIVATE2; |
| |
| extent_clear_unlock_delalloc(inode, start, |
| start + ram_size - 1, locked_page, |
| EXTENT_LOCKED | EXTENT_DELALLOC, |
| op); |
| disk_num_bytes -= cur_alloc_size; |
| num_bytes -= cur_alloc_size; |
| alloc_hint = ins.objectid + ins.offset; |
| start += cur_alloc_size; |
| } |
| out: |
| return ret; |
| |
| out_drop_extent_cache: |
| btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); |
| out_reserve: |
| btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); |
| out_unlock: |
| extent_clear_unlock_delalloc(inode, start, end, locked_page, |
| EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | |
| EXTENT_DELALLOC | EXTENT_DEFRAG, |
| PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); |
| goto out; |
| } |
| |
| /* |
| * work queue call back to started compression on a file and pages |
| */ |
| static noinline void async_cow_start(struct btrfs_work *work) |
| { |
| struct async_cow *async_cow; |
| int num_added = 0; |
| async_cow = container_of(work, struct async_cow, work); |
| |
| compress_file_range(async_cow->inode, async_cow->locked_page, |
| async_cow->start, async_cow->end, async_cow, |
| &num_added); |
| if (num_added == 0) { |
| btrfs_add_delayed_iput(async_cow->inode); |
| async_cow->inode = NULL; |
| } |
| } |
| |
| /* |
| * work queue call back to submit previously compressed pages |
| */ |
| static noinline void async_cow_submit(struct btrfs_work *work) |
| { |
| struct async_cow *async_cow; |
| struct btrfs_root *root; |
| unsigned long nr_pages; |
| |
| async_cow = container_of(work, struct async_cow, work); |
| |
| root = async_cow->root; |
| nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> |
| PAGE_CACHE_SHIFT; |
| |
| /* |
| * atomic_sub_return implies a barrier for waitqueue_active |
| */ |
| if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < |
| 5 * 1024 * 1024 && |
| waitqueue_active(&root->fs_info->async_submit_wait)) |
| wake_up(&root->fs_info->async_submit_wait); |
| |
| if (async_cow->inode) |
| submit_compressed_extents(async_cow->inode, async_cow); |
| } |
| |
| static noinline void async_cow_free(struct btrfs_work *work) |
| { |
| struct async_cow *async_cow; |
| async_cow = container_of(work, struct async_cow, work); |
| if (async_cow->inode) |
| btrfs_add_delayed_iput(async_cow->inode); |
| kfree(async_cow); |
| } |
| |
| static int cow_file_range_async(struct inode *inode, struct page *locked_page, |
| u64 start, u64 end, int *page_started, |
| unsigned long *nr_written) |
| { |
| struct async_cow *async_cow; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| unsigned long nr_pages; |
| u64 cur_end; |
| int limit = 10 * 1024 * 1024; |
| |
| clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, |
| 1, 0, NULL, GFP_NOFS); |
| while (start < end) { |
| async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
| BUG_ON(!async_cow); /* -ENOMEM */ |
| async_cow->inode = igrab(inode); |
| async_cow->root = root; |
| async_cow->locked_page = locked_page; |
| async_cow->start = start; |
| |
| if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && |
| !btrfs_test_opt(root, FORCE_COMPRESS)) |
| cur_end = end; |
| else |
| cur_end = min(end, start + 512 * 1024 - 1); |
| |
| async_cow->end = cur_end; |
| INIT_LIST_HEAD(&async_cow->extents); |
| |
| btrfs_init_work(&async_cow->work, |
| btrfs_delalloc_helper, |
| async_cow_start, async_cow_submit, |
| async_cow_free); |
| |
| nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> |
| PAGE_CACHE_SHIFT; |
| atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); |
| |
| btrfs_queue_work(root->fs_info->delalloc_workers, |
| &async_cow->work); |
| |
| if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { |
| wait_event(root->fs_info->async_submit_wait, |
| (atomic_read(&root->fs_info->async_delalloc_pages) < |
| limit)); |
| } |
| |
| while (atomic_read(&root->fs_info->async_submit_draining) && |
| atomic_read(&root->fs_info->async_delalloc_pages)) { |
| wait_event(root->fs_info->async_submit_wait, |
| (atomic_read(&root->fs_info->async_delalloc_pages) == |
| 0)); |
| } |
| |
| *nr_written += nr_pages; |
| start = cur_end + 1; |
| } |
| *page_started = 1; |
| return 0; |
| } |
| |
| static noinline int csum_exist_in_range(struct btrfs_root *root, |
| u64 bytenr, u64 num_bytes) |
| { |
| int ret; |
| struct btrfs_ordered_sum *sums; |
| LIST_HEAD(list); |
| |
| ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, |
| bytenr + num_bytes - 1, &list, 0); |
| if (ret == 0 && list_empty(&list)) |
| return 0; |
| |
| while (!list_empty(&list)) { |
| sums = list_entry(list.next, struct btrfs_ordered_sum, list); |
| list_del(&sums->list); |
| kfree(sums); |
| } |
| return 1; |
| } |
| |
| /* |
| * when nowcow writeback call back. This checks for snapshots or COW copies |
| * of the extents that exist in the file, and COWs the file as required. |
| * |
| * If no cow copies or snapshots exist, we write directly to the existing |
| * blocks on disk |
| */ |
| static noinline int run_delalloc_nocow(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end, int *page_started, int force, |
| unsigned long *nr_written) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_trans_handle *trans; |
| struct extent_buffer *leaf; |
| struct btrfs_path *path; |
| struct btrfs_file_extent_item *fi; |
| struct btrfs_key found_key; |
| u64 cow_start; |
| u64 cur_offset; |
| u64 extent_end; |
| u64 extent_offset; |
| u64 disk_bytenr; |
| u64 num_bytes; |
| u64 disk_num_bytes; |
| u64 ram_bytes; |
| int extent_type; |
| int ret, err; |
| int type; |
| int nocow; |
| int check_prev = 1; |
| bool nolock; |
| u64 ino = btrfs_ino(inode); |
| |
| path = btrfs_alloc_path(); |
| if (!path) { |
| extent_clear_unlock_delalloc(inode, start, end, locked_page, |
| EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DO_ACCOUNTING | |
| EXTENT_DEFRAG, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| return -ENOMEM; |
| } |
| |
| nolock = btrfs_is_free_space_inode(inode); |
| |
| if (nolock) |
| trans = btrfs_join_transaction_nolock(root); |
| else |
| trans = btrfs_join_transaction(root); |
| |
| if (IS_ERR(trans)) { |
| extent_clear_unlock_delalloc(inode, start, end, locked_page, |
| EXTENT_LOCKED | EXTENT_DELALLOC | |
| EXTENT_DO_ACCOUNTING | |
| EXTENT_DEFRAG, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| btrfs_free_path(path); |
| return PTR_ERR(trans); |
| } |
| |
| trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
| |
| cow_start = (u64)-1; |
| cur_offset = start; |
| while (1) { |
| ret = btrfs_lookup_file_extent(trans, root, path, ino, |
| cur_offset, 0); |
| if (ret < 0) |
| goto error; |
| if (ret > 0 && path->slots[0] > 0 && check_prev) { |
| leaf = path->nodes[0]; |
| btrfs_item_key_to_cpu(leaf, &found_key, |
| path->slots[0] - 1); |
| if (found_key.objectid == ino && |
| found_key.type == BTRFS_EXTENT_DATA_KEY) |
| path->slots[0]--; |
| } |
| check_prev = 0; |
| next_slot: |
| leaf = path->nodes[0]; |
| if (path->slots[0] >= btrfs_header_nritems(leaf)) { |
| ret = btrfs_next_leaf(root, path); |
| if (ret < 0) |
| goto error; |
| if (ret > 0) |
| break; |
| leaf = path->nodes[0]; |
| } |
| |
| nocow = 0; |
| disk_bytenr = 0; |
| num_bytes = 0; |
| btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
| |
| if (found_key.objectid > ino) |
| break; |
| if (WARN_ON_ONCE(found_key.objectid < ino) || |
| found_key.type < BTRFS_EXTENT_DATA_KEY) { |
| path->slots[0]++; |
| goto next_slot; |
| } |
| if (found_key.type > BTRFS_EXTENT_DATA_KEY || |
| found_key.offset > end) |
| break; |
| |
| if (found_key.offset > cur_offset) { |
| extent_end = found_key.offset; |
| extent_type = 0; |
| goto out_check; |
| } |
| |
| fi = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| extent_type = btrfs_file_extent_type(leaf, fi); |
| |
| ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); |
| if (extent_type == BTRFS_FILE_EXTENT_REG || |
| extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
| disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
| extent_offset = btrfs_file_extent_offset(leaf, fi); |
| extent_end = found_key.offset + |
| btrfs_file_extent_num_bytes(leaf, fi); |
| disk_num_bytes = |
| btrfs_file_extent_disk_num_bytes(leaf, fi); |
| if (extent_end <= start) { |
| path->slots[0]++; |
| goto next_slot; |
| } |
| if (disk_bytenr == 0) |
| goto out_check; |
| if (btrfs_file_extent_compression(leaf, fi) || |
| btrfs_file_extent_encryption(leaf, fi) || |
| btrfs_file_extent_other_encoding(leaf, fi)) |
| goto out_check; |
| if (extent_type == BTRFS_FILE_EXTENT_REG && !force) |
| goto out_check; |
| if (btrfs_extent_readonly(root, disk_bytenr)) |
| goto out_check; |
| if (btrfs_cross_ref_exist(trans, root, ino, |
| found_key.offset - |
| extent_offset, disk_bytenr)) |
| goto out_check; |
| disk_bytenr += extent_offset; |
| disk_bytenr += cur_offset - found_key.offset; |
| num_bytes = min(end + 1, extent_end) - cur_offset; |
| /* |
| * if there are pending snapshots for this root, |
| * we fall into common COW way. |
| */ |
| if (!nolock) { |
| err = btrfs_start_write_no_snapshoting(root); |
| if (!err) |
| goto out_check; |
| } |
| /* |
| * force cow if csum exists in the range. |
| * this ensure that csum for a given extent are |
| * either valid or do not exist. |
| */ |
| if (csum_exist_in_range(root, disk_bytenr, num_bytes)) |
| goto out_check; |
| nocow = 1; |
| } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
| extent_end = found_key.offset + |
| btrfs_file_extent_inline_len(leaf, |
| path->slots[0], fi); |
| extent_end = ALIGN(extent_end, root->sectorsize); |
| } else { |
| BUG_ON(1); |
| } |
| out_check: |
| if (extent_end <= start) { |
| path->slots[0]++; |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshoting(root); |
| goto next_slot; |
| } |
| if (!nocow) { |
| if (cow_start == (u64)-1) |
| cow_start = cur_offset; |
| cur_offset = extent_end; |
| if (cur_offset > end) |
| break; |
| path->slots[0]++; |
| goto next_slot; |
| } |
| |
| btrfs_release_path(path); |
| if (cow_start != (u64)-1) { |
| ret = cow_file_range(inode, locked_page, |
| cow_start, found_key.offset - 1, |
| page_started, nr_written, 1); |
| if (ret) { |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshoting(root); |
| goto error; |
| } |
| cow_start = (u64)-1; |
| } |
| |
| if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
| struct extent_map *em; |
| struct extent_map_tree *em_tree; |
| em_tree = &BTRFS_I(inode)->extent_tree; |
| em = alloc_extent_map(); |
| BUG_ON(!em); /* -ENOMEM */ |
| em->start = cur_offset; |
| em->orig_start = found_key.offset - extent_offset; |
| em->len = num_bytes; |
| em->block_len = num_bytes; |
| em->block_start = disk_bytenr; |
| em->orig_block_len = disk_num_bytes; |
| em->ram_bytes = ram_bytes; |
| em->bdev = root->fs_info->fs_devices->latest_bdev; |
| em->mod_start = em->start; |
| em->mod_len = em->len; |
| set_bit(EXTENT_FLAG_PINNED, &em->flags); |
| set_bit(EXTENT_FLAG_FILLING, &em->flags); |
| em->generation = -1; |
| while (1) { |
| write_lock(&em_tree->lock); |
| ret = add_extent_mapping(em_tree, em, 1); |
| write_unlock(&em_tree->lock); |
| if (ret != -EEXIST) { |
| free_extent_map(em); |
| break; |
| } |
| btrfs_drop_extent_cache(inode, em->start, |
| em->start + em->len - 1, 0); |
| } |
| type = BTRFS_ORDERED_PREALLOC; |
| } else { |
| type = BTRFS_ORDERED_NOCOW; |
| } |
| |
| ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, |
| num_bytes, num_bytes, type); |
| BUG_ON(ret); /* -ENOMEM */ |
| |
| if (root->root_key.objectid == |
| BTRFS_DATA_RELOC_TREE_OBJECTID) { |
| ret = btrfs_reloc_clone_csums(inode, cur_offset, |
| num_bytes); |
| if (ret) { |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshoting(root); |
| goto error; |
| } |
| } |
| |
| extent_clear_unlock_delalloc(inode, cur_offset, |
| cur_offset + num_bytes - 1, |
| locked_page, EXTENT_LOCKED | |
| EXTENT_DELALLOC, PAGE_UNLOCK | |
| PAGE_SET_PRIVATE2); |
| if (!nolock && nocow) |
| btrfs_end_write_no_snapshoting(root); |
| cur_offset = extent_end; |
| if (cur_offset > end) |
| break; |
| } |
| btrfs_release_path(path); |
| |
| if (cur_offset <= end && cow_start == (u64)-1) { |
| cow_start = cur_offset; |
| cur_offset = end; |
| } |
| |
| if (cow_start != (u64)-1) { |
| ret = cow_file_range(inode, locked_page, cow_start, end, |
| page_started, nr_written, 1); |
| if (ret) |
| goto error; |
| } |
| |
| error: |
| err = btrfs_end_transaction(trans, root); |
| if (!ret) |
| ret = err; |
| |
| if (ret && cur_offset < end) |
| extent_clear_unlock_delalloc(inode, cur_offset, end, |
| locked_page, EXTENT_LOCKED | |
| EXTENT_DELALLOC | EXTENT_DEFRAG | |
| EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | |
| PAGE_CLEAR_DIRTY | |
| PAGE_SET_WRITEBACK | |
| PAGE_END_WRITEBACK); |
| btrfs_free_path(path); |
| return ret; |
| } |
| |
| static inline int need_force_cow(struct inode *inode, u64 start, u64 end) |
| { |
| |
| if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && |
| !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) |
| return 0; |
| |
| /* |
| * @defrag_bytes is a hint value, no spinlock held here, |
| * if is not zero, it means the file is defragging. |
| * Force cow if given extent needs to be defragged. |
| */ |
| if (BTRFS_I(inode)->defrag_bytes && |
| test_range_bit(&BTRFS_I(inode)->io_tree, start, end, |
| EXTENT_DEFRAG, 0, NULL)) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* |
| * extent_io.c call back to do delayed allocation processing |
| */ |
| static int run_delalloc_range(struct inode *inode, struct page *locked_page, |
| u64 start, u64 end, int *page_started, |
| unsigned long *nr_written) |
| { |
| int ret; |
| int force_cow = need_force_cow(inode, start, end); |
| |
| if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { |
| ret = run_delalloc_nocow(inode, locked_page, start, end, |
| page_started, 1, nr_written); |
| } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { |
| ret = run_delalloc_nocow(inode, locked_page, start, end, |
| page_started, 0, nr_written); |
| } else if (!inode_need_compress(inode)) { |
| ret = cow_file_range(inode, locked_page, start, end, |
| page_started, nr_written, 1); |
| } else { |
| set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, |
| &BTRFS_I(inode)->runtime_flags); |
| ret = cow_file_range_async(inode, locked_page, start, end, |
| page_started, nr_written); |
| } |
| return ret; |
| } |
| |
| static void btrfs_split_extent_hook(struct inode *inode, |
| struct extent_state *orig, u64 split) |
| { |
| u64 size; |
| |
| /* not delalloc, ignore it */ |
| if (!(orig->state & EXTENT_DELALLOC)) |
| return; |
| |
| size = orig->end - orig->start + 1; |
| if (size > BTRFS_MAX_EXTENT_SIZE) { |
| u64 num_extents; |
| u64 new_size; |
| |
| /* |
| * See the explanation in btrfs_merge_extent_hook, the same |
| * applies here, just in reverse. |
| */ |
| new_size = orig->end - split + 1; |
| num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
| BTRFS_MAX_EXTENT_SIZE); |
| new_size = split - orig->start; |
| num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
| BTRFS_MAX_EXTENT_SIZE); |
| if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, |
| BTRFS_MAX_EXTENT_SIZE) >= num_extents) |
| return; |
| } |
| |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents++; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| /* |
| * extent_io.c merge_extent_hook, used to track merged delayed allocation |
| * extents so we can keep track of new extents that are just merged onto old |
| * extents, such as when we are doing sequential writes, so we can properly |
| * account for the metadata space we'll need. |
| */ |
| static void btrfs_merge_extent_hook(struct inode *inode, |
| struct extent_state *new, |
| struct extent_state *other) |
| { |
| u64 new_size, old_size; |
| u64 num_extents; |
| |
| /* not delalloc, ignore it */ |
| if (!(other->state & EXTENT_DELALLOC)) |
| return; |
| |
| if (new->start > other->start) |
| new_size = new->end - other->start + 1; |
| else |
| new_size = other->end - new->start + 1; |
| |
| /* we're not bigger than the max, unreserve the space and go */ |
| if (new_size <= BTRFS_MAX_EXTENT_SIZE) { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents--; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| return; |
| } |
| |
| /* |
| * We have to add up either side to figure out how many extents were |
| * accounted for before we merged into one big extent. If the number of |
| * extents we accounted for is <= the amount we need for the new range |
| * then we can return, otherwise drop. Think of it like this |
| * |
| * [ 4k][MAX_SIZE] |
| * |
| * So we've grown the extent by a MAX_SIZE extent, this would mean we |
| * need 2 outstanding extents, on one side we have 1 and the other side |
| * we have 1 so they are == and we can return. But in this case |
| * |
| * [MAX_SIZE+4k][MAX_SIZE+4k] |
| * |
| * Each range on their own accounts for 2 extents, but merged together |
| * they are only 3 extents worth of accounting, so we need to drop in |
| * this case. |
| */ |
| old_size = other->end - other->start + 1; |
| num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, |
| BTRFS_MAX_EXTENT_SIZE); |
| old_size = new->end - new->start + 1; |
| num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, |
| BTRFS_MAX_EXTENT_SIZE); |
| |
| if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
| BTRFS_MAX_EXTENT_SIZE) >= num_extents) |
| return; |
| |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents--; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| static void btrfs_add_delalloc_inodes(struct btrfs_root *root, |
| struct inode *inode) |
| { |
| spin_lock(&root->delalloc_lock); |
| if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { |
| list_add_tail(&BTRFS_I(inode)->delalloc_inodes, |
| &root->delalloc_inodes); |
| set_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &BTRFS_I(inode)->runtime_flags); |
| root->nr_delalloc_inodes++; |
| if (root->nr_delalloc_inodes == 1) { |
| spin_lock(&root->fs_info->delalloc_root_lock); |
| BUG_ON(!list_empty(&root->delalloc_root)); |
| list_add_tail(&root->delalloc_root, |
| &root->fs_info->delalloc_roots); |
| spin_unlock(&root->fs_info->delalloc_root_lock); |
| } |
| } |
| spin_unlock(&root->delalloc_lock); |
| } |
| |
| static void btrfs_del_delalloc_inode(struct btrfs_root *root, |
| struct inode *inode) |
| { |
| spin_lock(&root->delalloc_lock); |
| if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { |
| list_del_init(&BTRFS_I(inode)->delalloc_inodes); |
| clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &BTRFS_I(inode)->runtime_flags); |
| root->nr_delalloc_inodes--; |
| if (!root->nr_delalloc_inodes) { |
| spin_lock(&root->fs_info->delalloc_root_lock); |
| BUG_ON(list_empty(&root->delalloc_root)); |
| list_del_init(&root->delalloc_root); |
| spin_unlock(&root->fs_info->delalloc_root_lock); |
| } |
| } |
| spin_unlock(&root->delalloc_lock); |
| } |
| |
| /* |
| * extent_io.c set_bit_hook, used to track delayed allocation |
| * bytes in this file, and to maintain the list of inodes that |
| * have pending delalloc work to be done. |
| */ |
| static void btrfs_set_bit_hook(struct inode *inode, |
| struct extent_state *state, unsigned *bits) |
| { |
| |
| if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) |
| WARN_ON(1); |
| /* |
| * set_bit and clear bit hooks normally require _irqsave/restore |
| * but in this case, we are only testing for the DELALLOC |
| * bit, which is only set or cleared with irqs on |
| */ |
| if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| u64 len = state->end + 1 - state->start; |
| bool do_list = !btrfs_is_free_space_inode(inode); |
| |
| if (*bits & EXTENT_FIRST_DELALLOC) { |
| *bits &= ~EXTENT_FIRST_DELALLOC; |
| } else { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents++; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| /* For sanity tests */ |
| if (btrfs_test_is_dummy_root(root)) |
| return; |
| |
| __percpu_counter_add(&root->fs_info->delalloc_bytes, len, |
| root->fs_info->delalloc_batch); |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->delalloc_bytes += len; |
| if (*bits & EXTENT_DEFRAG) |
| BTRFS_I(inode)->defrag_bytes += len; |
| if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &BTRFS_I(inode)->runtime_flags)) |
| btrfs_add_delalloc_inodes(root, inode); |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| } |
| |
| /* |
| * extent_io.c clear_bit_hook, see set_bit_hook for why |
| */ |
| static void btrfs_clear_bit_hook(struct inode *inode, |
| struct extent_state *state, |
| unsigned *bits) |
| { |
| u64 len = state->end + 1 - state->start; |
| u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, |
| BTRFS_MAX_EXTENT_SIZE); |
| |
| spin_lock(&BTRFS_I(inode)->lock); |
| if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) |
| BTRFS_I(inode)->defrag_bytes -= len; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| |
| /* |
| * set_bit and clear bit hooks normally require _irqsave/restore |
| * but in this case, we are only testing for the DELALLOC |
| * bit, which is only set or cleared with irqs on |
| */ |
| if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| bool do_list = !btrfs_is_free_space_inode(inode); |
| |
| if (*bits & EXTENT_FIRST_DELALLOC) { |
| *bits &= ~EXTENT_FIRST_DELALLOC; |
| } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->outstanding_extents -= num_extents; |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| |
| /* |
| * We don't reserve metadata space for space cache inodes so we |
| * don't need to call dellalloc_release_metadata if there is an |
| * error. |
| */ |
| if (*bits & EXTENT_DO_ACCOUNTING && |
| root != root->fs_info->tree_root) |
| btrfs_delalloc_release_metadata(inode, len); |
| |
| /* For sanity tests. */ |
| if (btrfs_test_is_dummy_root(root)) |
| return; |
| |
| if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
| && do_list && !(state->state & EXTENT_NORESERVE)) |
| btrfs_free_reserved_data_space_noquota(inode, |
| state->start, len); |
| |
| __percpu_counter_add(&root->fs_info->delalloc_bytes, -len, |
| root->fs_info->delalloc_batch); |
| spin_lock(&BTRFS_I(inode)->lock); |
| BTRFS_I(inode)->delalloc_bytes -= len; |
| if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && |
| test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| &BTRFS_I(inode)->runtime_flags)) |
| btrfs_del_delalloc_inode(root, inode); |
| spin_unlock(&BTRFS_I(inode)->lock); |
| } |
| } |
| |
| /* |
| * extent_io.c merge_bio_hook, this must check the chunk tree to make sure |
| * we don't create bios that span stripes or chunks |
| */ |
| int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, |
| size_t size, struct bio *bio, |
| unsigned long bio_flags) |
| { |
| struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; |
| u64 logical = (u64)bio->bi_iter.bi_sector << 9; |
| u64 length = 0; |
| u64 map_length; |
| int ret; |
| |
| if (bio_flags & EXTENT_BIO_COMPRESSED) |
| return 0; |
| |
| length = bio->bi_iter.bi_size; |
| map_length = length; |
| ret = btrfs_map_block(root->fs_info, rw, logical, |
| &map_length, NULL, 0); |
| /* Will always return 0 with map_multi == NULL */ |
| BUG_ON(ret < 0); |
| if (map_length < length + size) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * in order to insert checksums into the metadata in large chunks, |
| * we wait until bio submission time. All the pages in the bio are |
| * checksummed and sums are attached onto the ordered extent record. |
| * |
| * At IO completion time the cums attached on the ordered extent record |
| * are inserted into the btree |
| */ |
| static int __btrfs_submit_bio_start(struct inode *inode, int rw, |
| struct bio *bio, int mirror_num, |
| unsigned long bio_flags, |
| u64 bio_offset) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| int ret = 0; |
| |
| ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); |
| BUG_ON(ret); /* -ENOMEM */ |
| return 0; |
| } |
| |
| /* |
| * in order to insert checksums into the metadata in large chunks, |
| * we wait until bio submission time. All the pages in the bio are |
| * checksummed and sums are attached onto the ordered extent record. |
| * |
| * At IO completion time the cums attached on the ordered extent record |
| * are inserted into the btree |
| */ |
| static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, |
| int mirror_num, unsigned long bio_flags, |
| u64 bio_offset) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| int ret; |
| |
| ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); |
| if (ret) { |
| bio->bi_error = ret; |
| bio_endio(bio); |
| } |
| return ret; |
| } |
| |
| /* |
| * extent_io.c submission hook. This does the right thing for csum calculation |
| * on write, or reading the csums from the tree before a read |
| */ |
| static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, |
| int mirror_num, unsigned long bio_flags, |
| u64 bio_offset) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; |
| int ret = 0; |
| int skip_sum; |
| int async = !atomic_read(&BTRFS_I(inode)->sync_writers); |
| |
| skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
| |
| if (btrfs_is_free_space_inode(inode)) |
| metadata = BTRFS_WQ_ENDIO_FREE_SPACE; |
| |
| if (!(rw & REQ_WRITE)) { |
| ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); |
| if (ret) |
| goto out; |
| |
| if (bio_flags & EXTENT_BIO_COMPRESSED) { |
| ret = btrfs_submit_compressed_read(inode, bio, |
| mirror_num, |
| bio_flags); |
| goto out; |
| } else if (!skip_sum) { |
| ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); |
| if (ret) |
| goto out; |
| } |
| goto mapit; |
| } else if (async && !skip_sum) { |
| /* csum items have already been cloned */ |
| if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) |
| goto mapit; |
| /* we're doing a write, do the async checksumming */ |
| ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, |
| inode, rw, bio, mirror_num, |
| bio_flags, bio_offset, |
| __btrfs_submit_bio_start, |
| __btrfs_submit_bio_done); |
| goto out; |
| } else if (!skip_sum) { |
| ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); |
| if (ret) |
| goto out; |
| } |
| |
| mapit: |
| ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); |
| |
| out: |
| if (ret < 0) { |
| bio->bi_error = ret; |
| bio_endio(bio); |
| } |
| return ret; |
| } |
| |
| /* |
| * given a list of ordered sums record them in the inode. This happens |
| * at IO completion time based on sums calculated at bio submission time. |
| */ |
| static noinline int add_pending_csums(struct btrfs_trans_handle *trans, |
| struct inode *inode, u64 file_offset, |
| struct list_head *list) |
| { |
| struct btrfs_ordered_sum *sum; |
| |
| list_for_each_entry(sum, list, list) { |
| trans->adding_csums = 1; |
| btrfs_csum_file_blocks(trans, |
| BTRFS_I(inode)->root->fs_info->csum_root, sum); |
| trans->adding_csums = 0; |
| } |
| return 0; |
| } |
| |
| int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
| struct extent_state **cached_state) |
| { |
| WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); |
| return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
| cached_state, GFP_NOFS); |
| } |
| |
| /* see btrfs_writepage_start_hook for details on why this is required */ |
| struct btrfs_writepage_fixup { |
| struct page *page; |
| struct btrfs_work work; |
| }; |
| |
| static void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
| { |
| struct btrfs_writepage_fixup *fixup; |
| struct btrfs_ordered_extent *ordered; |
| struct extent_state *cached_state = NULL; |
| struct page *page; |
| struct inode *inode; |
| u64 page_start; |
| u64 page_end; |
| int ret; |
| |
| fixup = container_of(work, struct btrfs_writepage_fixup, work); |
| page = fixup->page; |
| again: |
| lock_page(page); |
| if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { |
| ClearPageChecked(page); |
| goto out_page; |
| } |
| |
| inode = page->mapping->host; |
| page_start = page_offset(page); |
| page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; |
| |
| lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, |
| &cached_state); |
| |
| /* already ordered? We're done */ |
| if (PagePrivate2(page)) |
| goto out; |
| |
| ordered = btrfs_lookup_ordered_extent(inode, page_start); |
| if (ordered) { |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, |
| page_end, &cached_state, GFP_NOFS); |
| unlock_page(page); |
| btrfs_start_ordered_extent(inode, ordered, 1); |
| btrfs_put_ordered_extent(ordered); |
| goto again; |
| } |
| |
| ret = btrfs_delalloc_reserve_space(inode, page_start, |
| PAGE_CACHE_SIZE); |
| if (ret) { |
| mapping_set_error(page->mapping, ret); |
| end_extent_writepage(page, ret, page_start, page_end); |
| ClearPageChecked(page); |
| goto out; |
| } |
| |
| btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); |
| ClearPageChecked(page); |
| set_page_dirty(page); |
| out: |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
| &cached_state, GFP_NOFS); |
| out_page: |
| unlock_page(page); |
| page_cache_release(page); |
| kfree(fixup); |
| } |
| |
| /* |
| * There are a few paths in the higher layers of the kernel that directly |
| * set the page dirty bit without asking the filesystem if it is a |
| * good idea. This causes problems because we want to make sure COW |
| * properly happens and the data=ordered rules are followed. |
| * |
| * In our case any range that doesn't have the ORDERED bit set |
| * hasn't been properly setup for IO. We kick off an async process |
| * to fix it up. The async helper will wait for ordered extents, set |
| * the delalloc bit and make it safe to write the page. |
| */ |
| static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) |
| { |
| struct inode *inode = page->mapping->host; |
| struct btrfs_writepage_fixup *fixup; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| |
| /* this page is properly in the ordered list */ |
| if (TestClearPagePrivate2(page)) |
| return 0; |
| |
| if (PageChecked(page)) |
| return -EAGAIN; |
| |
| fixup = kzalloc(sizeof(*fixup), GFP_NOFS); |
| if (!fixup) |
| return -EAGAIN; |
| |
| SetPageChecked(page); |
| page_cache_get(page); |
| btrfs_init_work(&fixup->work, btrfs_fixup_helper, |
| btrfs_writepage_fixup_worker, NULL, NULL); |
| fixup->page = page; |
| btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); |
| return -EBUSY; |
| } |
| |
| static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
| struct inode *inode, u64 file_pos, |
| u64 disk_bytenr, u64 disk_num_bytes, |
| u64 num_bytes, u64 ram_bytes, |
| u8 compression, u8 encryption, |
| u16 other_encoding, int extent_type) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_file_extent_item *fi; |
| struct btrfs_path *path; |
| struct extent_buffer *leaf; |
| struct btrfs_key ins; |
| int extent_inserted = 0; |
| int ret; |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| |
| /* |
| * we may be replacing one extent in the tree with another. |
| * The new extent is pinned in the extent map, and we don't want |
| * to drop it from the cache until it is completely in the btree. |
| * |
| * So, tell btrfs_drop_extents to leave this extent in the cache. |
| * the caller is expected to unpin it and allow it to be merged |
| * with the others. |
| */ |
| ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, |
| file_pos + num_bytes, NULL, 0, |
| 1, sizeof(*fi), &extent_inserted); |
| if (ret) |
| goto out; |
| |
| if (!extent_inserted) { |
| ins.objectid = btrfs_ino(inode); |
| ins.offset = file_pos; |
| ins.type = BTRFS_EXTENT_DATA_KEY; |
| |
| path->leave_spinning = 1; |
| ret = btrfs_insert_empty_item(trans, root, path, &ins, |
| sizeof(*fi)); |
| if (ret) |
| goto out; |
| } |
| leaf = path->nodes[0]; |
| fi = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| btrfs_set_file_extent_generation(leaf, fi, trans->transid); |
| btrfs_set_file_extent_type(leaf, fi, extent_type); |
| btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); |
| btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); |
| btrfs_set_file_extent_offset(leaf, fi, 0); |
| btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); |
| btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); |
| btrfs_set_file_extent_compression(leaf, fi, compression); |
| btrfs_set_file_extent_encryption(leaf, fi, encryption); |
| btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); |
| |
| btrfs_mark_buffer_dirty(leaf); |
| btrfs_release_path(path); |
| |
| inode_add_bytes(inode, num_bytes); |
| |
| ins.objectid = disk_bytenr; |
| ins.offset = disk_num_bytes; |
| ins.type = BTRFS_EXTENT_ITEM_KEY; |
| ret = btrfs_alloc_reserved_file_extent(trans, root, |
| root->root_key.objectid, |
| btrfs_ino(inode), file_pos, |
| ram_bytes, &ins); |
| /* |
| * Release the reserved range from inode dirty range map, as it is |
| * already moved into delayed_ref_head |
| */ |
| btrfs_qgroup_release_data(inode, file_pos, ram_bytes); |
| out: |
| btrfs_free_path(path); |
| |
| return ret; |
| } |
| |
| /* snapshot-aware defrag */ |
| struct sa_defrag_extent_backref { |
| struct rb_node node; |
| struct old_sa_defrag_extent *old; |
| u64 root_id; |
| u64 inum; |
| u64 file_pos; |
| u64 extent_offset; |
| u64 num_bytes; |
| u64 generation; |
| }; |
| |
| struct old_sa_defrag_extent { |
| struct list_head list; |
| struct new_sa_defrag_extent *new; |
| |
| u64 extent_offset; |
| u64 bytenr; |
| u64 offset; |
| u64 len; |
| int count; |
| }; |
| |
| struct new_sa_defrag_extent { |
| struct rb_root root; |
| struct list_head head; |
| struct btrfs_path *path; |
| struct inode *inode; |
| u64 file_pos; |
| u64 len; |
| u64 bytenr; |
| u64 disk_len; |
| u8 compress_type; |
| }; |
| |
| static int backref_comp(struct sa_defrag_extent_backref *b1, |
| struct sa_defrag_extent_backref *b2) |
| { |
| if (b1->root_id < b2->root_id) |
| return -1; |
| else if (b1->root_id > b2->root_id) |
| return 1; |
| |
| if (b1->inum < b2->inum) |
| return -1; |
| else if (b1->inum > b2->inum) |
| return 1; |
| |
| if (b1->file_pos < b2->file_pos) |
| return -1; |
| else if (b1->file_pos > b2->file_pos) |
| return 1; |
| |
| /* |
| * [------------------------------] ===> (a range of space) |
| * |<--->| |<---->| =============> (fs/file tree A) |
| * |<---------------------------->| ===> (fs/file tree B) |
| * |
| * A range of space can refer to two file extents in one tree while |
| * refer to only one file extent in another tree. |
| * |
| * So we may process a disk offset more than one time(two extents in A) |
| * and locate at the same extent(one extent in B), then insert two same |
| * backrefs(both refer to the extent in B). |
| */ |
| return 0; |
| } |
| |
| static void backref_insert(struct rb_root *root, |
| struct sa_defrag_extent_backref *backref) |
| { |
| struct rb_node **p = &root->rb_node; |
| struct rb_node *parent = NULL; |
| struct sa_defrag_extent_backref *entry; |
| int ret; |
| |
| while (*p) { |
| parent = *p; |
| entry = rb_entry(parent, struct sa_defrag_extent_backref, node); |
| |
| ret = backref_comp(backref, entry); |
| if (ret < 0) |
| p = &(*p)->rb_left; |
| else |
| p = &(*p)->rb_right; |
| } |
| |
| rb_link_node(&backref->node, parent, p); |
| rb_insert_color(&backref->node, root); |
| } |
| |
| /* |
| * Note the backref might has changed, and in this case we just return 0. |
| */ |
| static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, |
| void *ctx) |
| { |
| struct btrfs_file_extent_item *extent; |
| struct btrfs_fs_info *fs_info; |
| struct old_sa_defrag_extent *old = ctx; |
| struct new_sa_defrag_extent *new = old->new; |
| struct btrfs_path *path = new->path; |
| struct btrfs_key key; |
| struct btrfs_root *root; |
| struct sa_defrag_extent_backref *backref; |
| struct extent_buffer *leaf; |
| struct inode *inode = new->inode; |
| int slot; |
| int ret; |
| u64 extent_offset; |
| u64 num_bytes; |
| |
| if (BTRFS_I(inode)->root->root_key.objectid == root_id && |
| inum == btrfs_ino(inode)) |
| return 0; |
| |
| key.objectid = root_id; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| |
| fs_info = BTRFS_I(inode)->root->fs_info; |
| root = btrfs_read_fs_root_no_name(fs_info, &key); |
| if (IS_ERR(root)) { |
| if (PTR_ERR(root) == -ENOENT) |
| return 0; |
| WARN_ON(1); |
| pr_debug("inum=%llu, offset=%llu, root_id=%llu\n", |
| inum, offset, root_id); |
| return PTR_ERR(root); |
| } |
| |
| key.objectid = inum; |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| if (offset > (u64)-1 << 32) |
| key.offset = 0; |
| else |
| key.offset = offset; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (WARN_ON(ret < 0)) |
| return ret; |
| ret = 0; |
| |
| while (1) { |
| cond_resched(); |
| |
| leaf = path->nodes[0]; |
| slot = path->slots[0]; |
| |
| if (slot >= btrfs_header_nritems(leaf)) { |
| ret = btrfs_next_leaf(root, path); |
| if (ret < 0) { |
| goto out; |
| } else if (ret > 0) { |
| ret = 0; |
| goto out; |
| } |
| continue; |
| } |
| |
| path->slots[0]++; |
| |
| btrfs_item_key_to_cpu(leaf, &key, slot); |
| |
| if (key.objectid > inum) |
| goto out; |
| |
| if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) |
| continue; |
| |
| extent = btrfs_item_ptr(leaf, slot, |
| struct btrfs_file_extent_item); |
| |
| if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) |
| continue; |
| |
| /* |
| * 'offset' refers to the exact key.offset, |
| * NOT the 'offset' field in btrfs_extent_data_ref, ie. |
| * (key.offset - extent_offset). |
| */ |
| if (key.offset != offset) |
| continue; |
| |
| extent_offset = btrfs_file_extent_offset(leaf, extent); |
| num_bytes = btrfs_file_extent_num_bytes(leaf, extent); |
| |
| if (extent_offset >= old->extent_offset + old->offset + |
| old->len || extent_offset + num_bytes <= |
| old->extent_offset + old->offset) |
| continue; |
| break; |
| } |
| |
| backref = kmalloc(sizeof(*backref), GFP_NOFS); |
| if (!backref) { |
| ret = -ENOENT; |
| goto out; |
| } |
| |
| backref->root_id = root_id; |
| backref->inum = inum; |
| backref->file_pos = offset; |
| backref->num_bytes = num_bytes; |
| backref->extent_offset = extent_offset; |
| backref->generation = btrfs_file_extent_generation(leaf, extent); |
| backref->old = old; |
| backref_insert(&new->root, backref); |
| old->count++; |
| out: |
| btrfs_release_path(path); |
| WARN_ON(ret); |
| return ret; |
| } |
| |
| static noinline bool record_extent_backrefs(struct btrfs_path *path, |
| struct new_sa_defrag_extent *new) |
| { |
| struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info; |
| struct old_sa_defrag_extent *old, *tmp; |
| int ret; |
| |
| new->path = path; |
| |
| list_for_each_entry_safe(old, tmp, &new->head, list) { |
| ret = iterate_inodes_from_logical(old->bytenr + |
| old->extent_offset, fs_info, |
| path, record_one_backref, |
| old); |
| if (ret < 0 && ret != -ENOENT) |
| return false; |
| |
| /* no backref to be processed for this extent */ |
| if (!old->count) { |
| list_del(&old->list); |
| kfree(old); |
| } |
| } |
| |
| if (list_empty(&new->head)) |
| return false; |
| |
| return true; |
| } |
| |
| static int relink_is_mergable(struct extent_buffer *leaf, |
| struct btrfs_file_extent_item *fi, |
| struct new_sa_defrag_extent *new) |
| { |
| if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) |
| return 0; |
| |
| if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) |
| return 0; |
| |
| if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) |
| return 0; |
| |
| if (btrfs_file_extent_encryption(leaf, fi) || |
| btrfs_file_extent_other_encoding(leaf, fi)) |
| return 0; |
| |
| return 1; |
| } |
| |
| /* |
| * Note the backref might has changed, and in this case we just return 0. |
| */ |
| static noinline int relink_extent_backref(struct btrfs_path *path, |
| struct sa_defrag_extent_backref *prev, |
| struct sa_defrag_extent_backref *backref) |
| { |
| struct btrfs_file_extent_item *extent; |
| struct btrfs_file_extent_item *item; |
| struct btrfs_ordered_extent *ordered; |
| struct btrfs_trans_handle *trans; |
| struct btrfs_fs_info *fs_info; |
| struct btrfs_root *root; |
| struct btrfs_key key; |
| struct extent_buffer *leaf; |
| struct old_sa_defrag_extent *old = backref->old; |
| struct new_sa_defrag_extent *new = old->new; |
| struct inode *src_inode = new->inode; |
| struct inode *inode; |
| struct extent_state *cached = NULL; |
| int ret = 0; |
| u64 start; |
| u64 len; |
| u64 lock_start; |
| u64 lock_end; |
| bool merge = false; |
| int index; |
| |
| if (prev && prev->root_id == backref->root_id && |
| prev->inum == backref->inum && |
| prev->file_pos + prev->num_bytes == backref->file_pos) |
| merge = true; |
| |
| /* step 1: get root */ |
| key.objectid = backref->root_id; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| |
| fs_info = BTRFS_I(src_inode)->root->fs_info; |
| index = srcu_read_lock(&fs_info->subvol_srcu); |
| |
| root = btrfs_read_fs_root_no_name(fs_info, &key); |
| if (IS_ERR(root)) { |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| if (PTR_ERR(root) == -ENOENT) |
| return 0; |
| return PTR_ERR(root); |
| } |
| |
| if (btrfs_root_readonly(root)) { |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| return 0; |
| } |
| |
| /* step 2: get inode */ |
| key.objectid = backref->inum; |
| key.type = BTRFS_INODE_ITEM_KEY; |
| key.offset = 0; |
| |
| inode = btrfs_iget(fs_info->sb, &key, root, NULL); |
| if (IS_ERR(inode)) { |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| return 0; |
| } |
| |
| srcu_read_unlock(&fs_info->subvol_srcu, index); |
| |
| /* step 3: relink backref */ |
| lock_start = backref->file_pos; |
| lock_end = backref->file_pos + backref->num_bytes - 1; |
| lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, |
| 0, &cached); |
| |
| ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); |
| if (ordered) { |
| btrfs_put_ordered_extent(ordered); |
| goto out_unlock; |
| } |
| |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| goto out_unlock; |
| } |
| |
| key.objectid = backref->inum; |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| key.offset = backref->file_pos; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (ret < 0) { |
| goto out_free_path; |
| } else if (ret > 0) { |
| ret = 0; |
| goto out_free_path; |
| } |
| |
| extent = btrfs_item_ptr(path->nodes[0], path->slots[0], |
| struct btrfs_file_extent_item); |
| |
| if (btrfs_file_extent_generation(path->nodes[0], extent) != |
| backref->generation) |
| goto out_free_path; |
| |
| btrfs_release_path(path); |
| |
| start = backref->file_pos; |
| if (backref->extent_offset < old->extent_offset + old->offset) |
| start += old->extent_offset + old->offset - |
| backref->extent_offset; |
| |
| len = min(backref->extent_offset + backref->num_bytes, |
| old->extent_offset + old->offset + old->len); |
| len -= max(backref->extent_offset, old->extent_offset + old->offset); |
| |
| ret = btrfs_drop_extents(trans, root, inode, start, |
| start + len, 1); |
| if (ret) |
| goto out_free_path; |
| again: |
| key.objectid = btrfs_ino(inode); |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| key.offset = start; |
| |
| path->leave_spinning = 1; |
| if (merge) { |
| struct btrfs_file_extent_item *fi; |
| u64 extent_len; |
| struct btrfs_key found_key; |
| |
| ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
| if (ret < 0) |
| goto out_free_path; |
| |
| path->slots[0]--; |
| leaf = path->nodes[0]; |
| btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
| |
| fi = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| extent_len = btrfs_file_extent_num_bytes(leaf, fi); |
| |
| if (extent_len + found_key.offset == start && |
| relink_is_mergable(leaf, fi, new)) { |
| btrfs_set_file_extent_num_bytes(leaf, fi, |
| extent_len + len); |
| btrfs_mark_buffer_dirty(leaf); |
| inode_add_bytes(inode, len); |
| |
| ret = 1; |
| goto out_free_path; |
| } else { |
| merge = false; |
| btrfs_release_path(path); |
| goto again; |
| } |
| } |
| |
| ret = btrfs_insert_empty_item(trans, root, path, &key, |
| sizeof(*extent)); |
| if (ret) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto out_free_path; |
| } |
| |
| leaf = path->nodes[0]; |
| item = btrfs_item_ptr(leaf, path->slots[0], |
| struct btrfs_file_extent_item); |
| btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); |
| btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); |
| btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); |
| btrfs_set_file_extent_num_bytes(leaf, item, len); |
| btrfs_set_file_extent_ram_bytes(leaf, item, new->len); |
| btrfs_set_file_extent_generation(leaf, item, trans->transid); |
| btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); |
| btrfs_set_file_extent_compression(leaf, item, new->compress_type); |
| btrfs_set_file_extent_encryption(leaf, item, 0); |
| btrfs_set_file_extent_other_encoding(leaf, item, 0); |
| |
| btrfs_mark_buffer_dirty(leaf); |
| inode_add_bytes(inode, len); |
| btrfs_release_path(path); |
| |
| ret = btrfs_inc_extent_ref(trans, root, new->bytenr, |
| new->disk_len, 0, |
| backref->root_id, backref->inum, |
| new->file_pos); /* start - extent_offset */ |
| if (ret) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto out_free_path; |
| } |
| |
| ret = 1; |
| out_free_path: |
| btrfs_release_path(path); |
| path->leave_spinning = 0; |
| btrfs_end_transaction(trans, root); |
| out_unlock: |
| unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, |
| &cached, GFP_NOFS); |
| iput(inode); |
| return ret; |
| } |
| |
| static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) |
| { |
| struct old_sa_defrag_extent *old, *tmp; |
| |
| if (!new) |
| return; |
| |
| list_for_each_entry_safe(old, tmp, &new->head, list) { |
| kfree(old); |
| } |
| kfree(new); |
| } |
| |
| static void relink_file_extents(struct new_sa_defrag_extent *new) |
| { |
| struct btrfs_path *path; |
| struct sa_defrag_extent_backref *backref; |
| struct sa_defrag_extent_backref *prev = NULL; |
| struct inode *inode; |
| struct btrfs_root *root; |
| struct rb_node *node; |
| int ret; |
| |
| inode = new->inode; |
| root = BTRFS_I(inode)->root; |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return; |
| |
| if (!record_extent_backrefs(path, new)) { |
| btrfs_free_path(path); |
| goto out; |
| } |
| btrfs_release_path(path); |
| |
| while (1) { |
| node = rb_first(&new->root); |
| if (!node) |
| break; |
| rb_erase(node, &new->root); |
| |
| backref = rb_entry(node, struct sa_defrag_extent_backref, node); |
| |
| ret = relink_extent_backref(path, prev, backref); |
| WARN_ON(ret < 0); |
| |
| kfree(prev); |
| |
| if (ret == 1) |
| prev = backref; |
| else |
| prev = NULL; |
| cond_resched(); |
| } |
| kfree(prev); |
| |
| btrfs_free_path(path); |
| out: |
| free_sa_defrag_extent(new); |
| |
| atomic_dec(&root->fs_info->defrag_running); |
| wake_up(&root->fs_info->transaction_wait); |
| } |
| |
| static struct new_sa_defrag_extent * |
| record_old_file_extents(struct inode *inode, |
| struct btrfs_ordered_extent *ordered) |
| { |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_path *path; |
| struct btrfs_key key; |
| struct old_sa_defrag_extent *old; |
| struct new_sa_defrag_extent *new; |
| int ret; |
| |
| new = kmalloc(sizeof(*new), GFP_NOFS); |
| if (!new) |
| return NULL; |
| |
| new->inode = inode; |
| new->file_pos = ordered->file_offset; |
| new->len = ordered->len; |
| new->bytenr = ordered->start; |
| new->disk_len = ordered->disk_len; |
| new->compress_type = ordered->compress_type; |
| new->root = RB_ROOT; |
| INIT_LIST_HEAD(&new->head); |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| goto out_kfree; |
| |
| key.objectid = btrfs_ino(inode); |
| key.type = BTRFS_EXTENT_DATA_KEY; |
| key.offset = new->file_pos; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (ret < 0) |
| goto out_free_path; |
| if (ret > 0 && path->slots[0] > 0) |
| path->slots[0]--; |
| |
| /* find out all the old extents for the file range */ |
| while (1) { |
| struct btrfs_file_extent_item *extent; |
| struct extent_buffer *l; |
| int slot; |
| u64 num_bytes; |
| u64 offset; |
| u64 end; |
| u64 disk_bytenr; |
| u64 extent_offset; |
| |
| l = path->nodes[0]; |
| slot = path->slots[0]; |
| |
| if (slot >= btrfs_header_nritems(l)) { |
| ret = btrfs_next_leaf(root, path); |
| if (ret < 0) |
| goto out_free_path; |
| else if (ret > 0) |
| break; |
| continue; |
| } |
| |
| btrfs_item_key_to_cpu(l, &key, slot); |
| |
| if (key.objectid != btrfs_ino(inode)) |
| break; |
| if (key.type != BTRFS_EXTENT_DATA_KEY) |
| break; |
| if (key.offset >= new->file_pos + new->len) |
| break; |
| |
| extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); |
| |
| num_bytes = btrfs_file_extent_num_bytes(l, extent); |
| if (key.offset + num_bytes < new->file_pos) |
| goto next; |
| |
| disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); |
| if (!disk_bytenr) |
| goto next; |
| |
| extent_offset = btrfs_file_extent_offset(l, extent); |
| |
| old = kmalloc(sizeof(*old), GFP_NOFS); |
| if (!old) |
| goto out_free_path; |
| |
| offset = max(new->file_pos, key.offset); |
| end = min(new->file_pos + new->len, key.offset + num_bytes); |
| |
| old->bytenr = disk_bytenr; |
| old->extent_offset = extent_offset; |
| old->offset = offset - key.offset; |
| old->len = end - offset; |
| old->new = new; |
| old->count = 0; |
| list_add_tail(&old->list, &new->head); |
| next: |
| path->slots[0]++; |
| cond_resched(); |
| } |
| |
| btrfs_free_path(path); |
| atomic_inc(&root->fs_info->defrag_running); |
| |
| return new; |
| |
| out_free_path: |
| btrfs_free_path(path); |
| out_kfree: |
| free_sa_defrag_extent(new); |
| return NULL; |
| } |
| |
| static void btrfs_release_delalloc_bytes(struct btrfs_root *root, |
| u64 start, u64 len) |
| { |
| struct btrfs_block_group_cache *cache; |
| |
| cache = btrfs_lookup_block_group(root->fs_info, start); |
| ASSERT(cache); |
| |
| spin_lock(&cache->lock); |
| cache->delalloc_bytes -= len; |
| spin_unlock(&cache->lock); |
| |
| btrfs_put_block_group(cache); |
| } |
| |
| /* as ordered data IO finishes, this gets called so we can finish |
| * an ordered extent if the range of bytes in the file it covers are |
| * fully written. |
| */ |
| static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) |
| { |
| struct inode *inode = ordered_extent->inode; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_trans_handle *trans = NULL; |
| struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| struct extent_state *cached_state = NULL; |
| struct new_sa_defrag_extent *new = NULL; |
| int compress_type = 0; |
| int ret = 0; |
| u64 logical_len = ordered_extent->len; |
| bool nolock; |
| bool truncated = false; |
| |
| nolock = btrfs_is_free_space_inode(inode); |
| |
| if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { |
| ret = -EIO; |
| goto out; |
| } |
| |
| btrfs_free_io_failure_record(inode, ordered_extent->file_offset, |
| ordered_extent->file_offset + |
| ordered_extent->len - 1); |
| |
| if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { |
| truncated = true; |
| logical_len = ordered_extent->truncated_len; |
| /* Truncated the entire extent, don't bother adding */ |
| if (!logical_len) |
| goto out; |
| } |
| |
| if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
| BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ |
| |
| /* |
| * For mwrite(mmap + memset to write) case, we still reserve |
| * space for NOCOW range. |
| * As NOCOW won't cause a new delayed ref, just free the space |
| */ |
| btrfs_qgroup_free_data(inode, ordered_extent->file_offset, |
| ordered_extent->len); |
| btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
| if (nolock) |
| trans = btrfs_join_transaction_nolock(root); |
| else |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| trans = NULL; |
| goto out; |
| } |
| trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
| ret = btrfs_update_inode_fallback(trans, root, inode); |
| if (ret) /* -ENOMEM or corruption */ |
| btrfs_abort_transaction(trans, root, ret); |
| goto out; |
| } |
| |
| lock_extent_bits(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + ordered_extent->len - 1, |
| 0, &cached_state); |
| |
| ret = test_range_bit(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + ordered_extent->len - 1, |
| EXTENT_DEFRAG, 1, cached_state); |
| if (ret) { |
| u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); |
| if (0 && last_snapshot >= BTRFS_I(inode)->generation) |
| /* the inode is shared */ |
| new = record_old_file_extents(inode, ordered_extent); |
| |
| clear_extent_bit(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + ordered_extent->len - 1, |
| EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); |
| } |
| |
| if (nolock) |
| trans = btrfs_join_transaction_nolock(root); |
| else |
| trans = btrfs_join_transaction(root); |
| if (IS_ERR(trans)) { |
| ret = PTR_ERR(trans); |
| trans = NULL; |
| goto out_unlock; |
| } |
| |
| trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
| |
| if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
| compress_type = ordered_extent->compress_type; |
| if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
| BUG_ON(compress_type); |
| ret = btrfs_mark_extent_written(trans, inode, |
| ordered_extent->file_offset, |
| ordered_extent->file_offset + |
| logical_len); |
| } else { |
| BUG_ON(root == root->fs_info->tree_root); |
| ret = insert_reserved_file_extent(trans, inode, |
| ordered_extent->file_offset, |
| ordered_extent->start, |
| ordered_extent->disk_len, |
| logical_len, logical_len, |
| compress_type, 0, 0, |
| BTRFS_FILE_EXTENT_REG); |
| if (!ret) |
| btrfs_release_delalloc_bytes(root, |
| ordered_extent->start, |
| ordered_extent->disk_len); |
| } |
| unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
| ordered_extent->file_offset, ordered_extent->len, |
| trans->transid); |
| if (ret < 0) { |
| btrfs_abort_transaction(trans, root, ret); |
| goto out_unlock; |
| } |
| |
| add_pending_csums(trans, inode, ordered_extent->file_offset, |
| &ordered_extent->list); |
| |
| btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
| ret = btrfs_update_inode_fallback(trans, root, inode); |
| if (ret) { /* -ENOMEM or corruption */ |
| btrfs_abort_transaction(trans, root, ret); |
| goto out_unlock; |
| } |
| ret = 0; |
| out_unlock: |
| unlock_extent_cached(io_tree, ordered_extent->file_offset, |
| ordered_extent->file_offset + |
| ordered_extent->len - 1, &cached_state, GFP_NOFS); |
| out: |
| if (root != root->fs_info->tree_root) |
| btrfs_delalloc_release_metadata(inode, ordered_extent->len); |
| if (trans) |
| btrfs_end_transaction(trans, root); |
| |
| if (ret || truncated) { |
| u64 start, end; |
| |
| if (truncated) |
| start = ordered_extent->file_offset + logical_len; |
| else |
| start = ordered_extent->file_offset; |
| end = ordered_extent->file_offset + ordered_extent->len - 1; |
| clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); |
| |
| /* Drop the cache for the part of the extent we didn't write. */ |
| btrfs_drop_extent_cache(inode, start, end, 0); |
| |
| /* |
| * If the ordered extent had an IOERR or something else went |
| * wrong we need to return the space for this ordered extent |
| * back to the allocator. We only free the extent in the |
| * truncated case if we didn't write out the extent at all. |
| */ |
| if ((ret || !logical_len) && |
| !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && |
| !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) |
| btrfs_free_reserved_extent(root, ordered_extent->start, |
| ordered_extent->disk_len, 1); |
| } |
| |
| |
| /* |
| * This needs to be done to make sure anybody waiting knows we are done |
| * updating everything for this ordered extent. |
| */ |
| btrfs_remove_ordered_extent(inode, ordered_extent); |
| |
| /* for snapshot-aware defrag */ |
| if (new) { |
| if (ret) { |
| free_sa_defrag_extent(new); |
| atomic_dec(&root->fs_info->defrag_running); |
| } else { |
| relink_file_extents(new); |
| } |
| } |
| |
| /* once for us */ |
| btrfs_put_ordered_extent(ordered_extent); |
| /* once for the tree */ |
| btrfs_put_ordered_extent(ordered_extent); |
| |
| return ret; |
| } |
| |
| static void finish_ordered_fn(struct btrfs_work *work) |
| { |
| struct btrfs_ordered_extent *ordered_extent; |
| ordered_extent = container_of(work, struct btrfs_ordered_extent, work); |
| btrfs_finish_ordered_io(ordered_extent); |
| } |
| |
| static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
| struct extent_state *state, int uptodate) |
| { |
| struct inode *inode = page->mapping->host; |
| struct btrfs_root *root = BTRFS_I(inode)->root; |
| struct btrfs_ordered_extent *ordered_extent = NULL; |
| struct btrfs_workqueue *wq; |
| btrfs_work_func_t func; |
| |
| trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
| |
| ClearPagePrivate2(page); |
| if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
| end - start + 1, uptodate)) |
| return 0; |
| |
| if (btrfs_is_free_space_inode(inode)) { |
| wq = root->fs_info->endio_freespace_worker; |
| func = btrfs_freespace_write_helper; |
| } else { |
| wq = root->fs_info->endio_write_workers; |
| func = btrfs_endio_write_helper; |
| } |
| |
| btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, |
| NULL); |
| btrfs_queue_work(wq, &ordered_extent->work); |
| |
| return 0; |
| } |
| |
| static int __readpage_endio_check(struct inode *inode, |
| struct btrfs_io_bio *io_bio, |
|