Btrfs: Compression corner fixes

Make sure we keep page->mapping NULL on the pages we're getting
via alloc_page.  It gets set so a few of the callbacks can do the right
thing, but in general these pages don't have a mapping.

Don't try to truncate compressed inline items in btrfs_drop_extents.
The whole compressed item must be preserved.

Don't try to create multipage inline compressed items.  When we try to
overwrite just the first page of the file, we would have to read in and recow
all the pages after it in the same compressed inline items.  For now, only
create single page inline items.

Make sure we lock pages in the correct order during delalloc.  The
search into the state tree for delalloc bytes can return bytes before
the page we already have locked.

Signed-off-by: Chris Mason <chris.mason@oracle.com>

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9adaa79..3549131 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -220,10 +220,12 @@
 	 */
 	inode = cb->inode;
 	tree = &BTRFS_I(inode)->io_tree;
+	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
 					 cb->start,
 					 cb->start + cb->len - 1,
 					 NULL, 1);
+	cb->compressed_pages[0]->mapping = NULL;
 
 	end_compressed_writeback(inode, cb->start, cb->len);
 	/* note, our inode could be gone now */
@@ -306,6 +308,7 @@
 		else
 			ret = 0;
 
+		page->mapping = NULL;
 		if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
 		    PAGE_CACHE_SIZE) {
 			bio_get(bio);
@@ -423,6 +426,7 @@
 		else
 			ret = 0;
 
+		page->mapping = NULL;
 		if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
 		    PAGE_CACHE_SIZE) {
 			bio_get(comp_bio);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index eb3c12e..9b37ce6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1242,13 +1242,22 @@
 	delalloc_end = 0;
 	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
 				    max_bytes);
-	if (!found) {
+	if (!found || delalloc_end <= *start) {
 		*start = delalloc_start;
 		*end = delalloc_end;
 		return found;
 	}
 
 	/*
+	 * start comes from the offset of locked_page.  We have to lock
+	 * pages in order, so we can't process delalloc bytes before
+	 * locked_page
+	 */
+	if (delalloc_start < *start) {
+		delalloc_start = *start;
+	}
+
+	/*
 	 * make sure to limit the number of pages we try to lock down
 	 * if we're looping.
 	 */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 238a8e2..0c8cc35 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -368,8 +368,8 @@
 	u64 search_start = start;
 	u64 leaf_start;
 	u64 ram_bytes = 0;
-	u8 compression = 0;
-	u8 encryption = 0;
+	u8 compression;
+	u8 encryption;
 	u16 other_encoding = 0;
 	u64 root_gen;
 	u64 root_owner;
@@ -415,6 +415,8 @@
 		leaf_start = 0;
 		root_gen = 0;
 		root_owner = 0;
+		compression = 0;
+		encryption = 0;
 		extent = NULL;
 		leaf = path->nodes[0];
 		slot = path->slots[0];
@@ -546,8 +548,12 @@
 						   inline_limit - key.offset);
 				inode_sub_bytes(inode, extent_end -
 						inline_limit);
-				btrfs_truncate_item(trans, root, path,
-						    new_size, 1);
+				btrfs_set_file_extent_ram_bytes(leaf, extent,
+							new_size);
+				if (!compression && !encryption) {
+					btrfs_truncate_item(trans, root, path,
+							    new_size, 1);
+				}
 			}
 		}
 		/* delete the entire extent */
@@ -567,8 +573,11 @@
 			new_size = btrfs_file_extent_calc_inline_size(
 						   extent_end - end);
 			inode_sub_bytes(inode, end - key.offset);
-			ret = btrfs_truncate_item(trans, root, path,
-						  new_size, 0);
+			btrfs_set_file_extent_ram_bytes(leaf, extent,
+							new_size);
+			if (!compression && !encryption)
+				ret = btrfs_truncate_item(trans, root, path,
+							  new_size, 0);
 			BUG_ON(ret);
 		}
 		/* create bookend, splitting the extent in two */
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 789c376..806708d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -239,6 +239,7 @@
 		data_len = compressed_size;
 
 	if (start > 0 ||
+	    actual_end >= PAGE_CACHE_SIZE ||
 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
 	    (!compressed_size &&
 	    (actual_end & (root->sectorsize - 1)) == 0) ||
@@ -248,7 +249,7 @@
 	}
 
 	ret = btrfs_drop_extents(trans, root, inode, start,
-				 aligned_end, aligned_end, &hint_byte);
+				 aligned_end, start, &hint_byte);
 	BUG_ON(ret);
 
 	if (isize > actual_end)
@@ -423,6 +424,7 @@
 		 * free any pages it allocated and our page pointer array
 		 */
 		for (i = 0; i < nr_pages_ret; i++) {
+			WARN_ON(pages[i]->mapping);
 			page_cache_release(pages[i]);
 		}
 		kfree(pages);
@@ -572,8 +574,10 @@
 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
 				     start, end, locked_page, 0, 0, 0);
 free_pages_out:
-	for (i = 0; i < nr_pages_ret; i++)
+	for (i = 0; i < nr_pages_ret; i++) {
+		WARN_ON(pages[i]->mapping);
 		page_cache_release(pages[i]);
+	}
 	if (pages)
 		kfree(pages);