| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Copyright (C) 2007,2008 Oracle. All rights reserved. |
| */ |
| |
| #include <linux/sched.h> |
| #include <linux/slab.h> |
| #include <linux/rbtree.h> |
| #include <linux/mm.h> |
| #include "ctree.h" |
| #include "disk-io.h" |
| #include "transaction.h" |
| #include "print-tree.h" |
| #include "locking.h" |
| |
| static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root |
| *root, struct btrfs_path *path, int level); |
| static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
| const struct btrfs_key *ins_key, struct btrfs_path *path, |
| int data_size, int extend); |
| static int push_node_left(struct btrfs_trans_handle *trans, |
| struct btrfs_fs_info *fs_info, |
| struct extent_buffer *dst, |
| struct extent_buffer *src, int empty); |
| static int balance_node_right(struct btrfs_trans_handle *trans, |
| struct btrfs_fs_info *fs_info, |
| struct extent_buffer *dst_buf, |
| struct extent_buffer *src_buf); |
| static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, |
| int level, int slot); |
| |
| struct btrfs_path *btrfs_alloc_path(void) |
| { |
| return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); |
| } |
| |
| /* |
| * set all locked nodes in the path to blocking locks. This should |
| * be done before scheduling |
| */ |
| noinline void btrfs_set_path_blocking(struct btrfs_path *p) |
| { |
| int i; |
| for (i = 0; i < BTRFS_MAX_LEVEL; i++) { |
| if (!p->nodes[i] || !p->locks[i]) |
| continue; |
| btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); |
| if (p->locks[i] == BTRFS_READ_LOCK) |
| p->locks[i] = BTRFS_READ_LOCK_BLOCKING; |
| else if (p->locks[i] == BTRFS_WRITE_LOCK) |
| p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; |
| } |
| } |
| |
| /* |
| * reset all the locked nodes in the patch to spinning locks. |
| * |
| * held is used to keep lockdep happy, when lockdep is enabled |
| * we set held to a blocking lock before we go around and |
| * retake all the spinlocks in the path. You can safely use NULL |
| * for held |
| */ |
| noinline void btrfs_clear_path_blocking(struct btrfs_path *p, |
| struct extent_buffer *held, int held_rw) |
| { |
| int i; |
| |
| if (held) { |
| btrfs_set_lock_blocking_rw(held, held_rw); |
| if (held_rw == BTRFS_WRITE_LOCK) |
| held_rw = BTRFS_WRITE_LOCK_BLOCKING; |
| else if (held_rw == BTRFS_READ_LOCK) |
| held_rw = BTRFS_READ_LOCK_BLOCKING; |
| } |
| btrfs_set_path_blocking(p); |
| |
| for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { |
| if (p->nodes[i] && p->locks[i]) { |
| btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); |
| if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) |
| p->locks[i] = BTRFS_WRITE_LOCK; |
| else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) |
| p->locks[i] = BTRFS_READ_LOCK; |
| } |
| } |
| |
| if (held) |
| btrfs_clear_lock_blocking_rw(held, held_rw); |
| } |
| |
| /* this also releases the path */ |
| void btrfs_free_path(struct btrfs_path *p) |
| { |
| if (!p) |
| return; |
| btrfs_release_path(p); |
| kmem_cache_free(btrfs_path_cachep, p); |
| } |
| |
| /* |
| * path release drops references on the extent buffers in the path |
| * and it drops any locks held by this path |
| * |
| * It is safe to call this on paths that no locks or extent buffers held. |
| */ |
| noinline void btrfs_release_path(struct btrfs_path *p) |
| { |
| int i; |
| |
| for (i = 0; i < BTRFS_MAX_LEVEL; i++) { |
| p->slots[i] = 0; |
| if (!p->nodes[i]) |
| continue; |
| if (p->locks[i]) { |
| btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); |
| p->locks[i] = 0; |
| } |
| free_extent_buffer(p->nodes[i]); |
| p->nodes[i] = NULL; |
| } |
| } |
| |
| /* |
| * safely gets a reference on the root node of a tree. A lock |
| * is not taken, so a concurrent writer may put a different node |
| * at the root of the tree. See btrfs_lock_root_node for the |
| * looping required. |
| * |
| * The extent buffer returned by this has a reference taken, so |
| * it won't disappear. It may stop being the root of the tree |
| * at any time because there are no locks held. |
| */ |
| struct extent_buffer *btrfs_root_node(struct btrfs_root *root) |
| { |
| struct extent_buffer *eb; |
| |
| while (1) { |
| rcu_read_lock(); |
| eb = rcu_dereference(root->node); |
| |
| /* |
| * RCU really hurts here, we could free up the root node because |
| * it was COWed but we may not get the new root node yet so do |
| * the inc_not_zero dance and if it doesn't work then |
| * synchronize_rcu and try again. |
| */ |
| if (atomic_inc_not_zero(&eb->refs)) { |
| rcu_read_unlock(); |
| break; |
| } |
| rcu_read_unlock(); |
| synchronize_rcu(); |
| } |
| return eb; |
| } |
| |
| /* loop around taking references on and locking the root node of the |
| * tree until you end up with a lock on the root. A locked buffer |
| * is returned, with a reference held. |
| */ |
| struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) |
| { |
| struct extent_buffer *eb; |
| |
| while (1) { |
| eb = btrfs_root_node(root); |
| btrfs_tree_lock(eb); |
| if (eb == root->node) |
| break; |
| btrfs_tree_unlock(eb); |
| free_extent_buffer(eb); |
| } |
| return eb; |
| } |
| |
| /* loop around taking references on and locking the root node of the |
| * tree until you end up with a lock on the root. A locked buffer |
| * is returned, with a reference held. |
| */ |
| struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) |
| { |
| struct extent_buffer *eb; |
| |
| while (1) { |
| eb = btrfs_root_node(root); |
| btrfs_tree_read_lock(eb); |
| if (eb == root->node) |
| break; |
| btrfs_tree_read_unlock(eb); |
| free_extent_buffer(eb); |
| } |
| return eb; |
| } |
| |
| /* cowonly root (everything not a reference counted cow subvolume), just get |
| * put onto a simple dirty list. transaction.c walks this to make sure they |
| * get properly updated on disk. |
| */ |
| static void add_root_to_dirty_list(struct btrfs_root *root) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| |
| if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || |
| !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) |
| return; |
| |
| spin_lock(&fs_info->trans_lock); |
| if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { |
| /* Want the extent tree to be the last on the list */ |
| if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID) |
| list_move_tail(&root->dirty_list, |
| &fs_info->dirty_cowonly_roots); |
| else |
| list_move(&root->dirty_list, |
| &fs_info->dirty_cowonly_roots); |
| } |
| spin_unlock(&fs_info->trans_lock); |
| } |
| |
| /* |
| * used by snapshot creation to make a copy of a root for a tree with |
| * a given objectid. The buffer with the new root node is returned in |
| * cow_ret, and this func returns zero on success or a negative error code. |
| */ |
| int btrfs_copy_root(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| struct extent_buffer *buf, |
| struct extent_buffer **cow_ret, u64 new_root_objectid) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *cow; |
| int ret = 0; |
| int level; |
| struct btrfs_disk_key disk_key; |
| |
| WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
| trans->transid != fs_info->running_transaction->transid); |
| WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
| trans->transid != root->last_trans); |
| |
| level = btrfs_header_level(buf); |
| if (level == 0) |
| btrfs_item_key(buf, &disk_key, 0); |
| else |
| btrfs_node_key(buf, &disk_key, 0); |
| |
| cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, |
| &disk_key, level, buf->start, 0); |
| if (IS_ERR(cow)) |
| return PTR_ERR(cow); |
| |
| copy_extent_buffer_full(cow, buf); |
| btrfs_set_header_bytenr(cow, cow->start); |
| btrfs_set_header_generation(cow, trans->transid); |
| btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); |
| btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | |
| BTRFS_HEADER_FLAG_RELOC); |
| if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) |
| btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); |
| else |
| btrfs_set_header_owner(cow, new_root_objectid); |
| |
| write_extent_buffer_fsid(cow, fs_info->fsid); |
| |
| WARN_ON(btrfs_header_generation(buf) > trans->transid); |
| if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) |
| ret = btrfs_inc_ref(trans, root, cow, 1); |
| else |
| ret = btrfs_inc_ref(trans, root, cow, 0); |
| |
| if (ret) |
| return ret; |
| |
| btrfs_mark_buffer_dirty(cow); |
| *cow_ret = cow; |
| return 0; |
| } |
| |
| enum mod_log_op { |
| MOD_LOG_KEY_REPLACE, |
| MOD_LOG_KEY_ADD, |
| MOD_LOG_KEY_REMOVE, |
| MOD_LOG_KEY_REMOVE_WHILE_FREEING, |
| MOD_LOG_KEY_REMOVE_WHILE_MOVING, |
| MOD_LOG_MOVE_KEYS, |
| MOD_LOG_ROOT_REPLACE, |
| }; |
| |
| struct tree_mod_root { |
| u64 logical; |
| u8 level; |
| }; |
| |
| struct tree_mod_elem { |
| struct rb_node node; |
| u64 logical; |
| u64 seq; |
| enum mod_log_op op; |
| |
| /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ |
| int slot; |
| |
| /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ |
| u64 generation; |
| |
| /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ |
| struct btrfs_disk_key key; |
| u64 blockptr; |
| |
| /* this is used for op == MOD_LOG_MOVE_KEYS */ |
| struct { |
| int dst_slot; |
| int nr_items; |
| } move; |
| |
| /* this is used for op == MOD_LOG_ROOT_REPLACE */ |
| struct tree_mod_root old_root; |
| }; |
| |
| /* |
| * Pull a new tree mod seq number for our operation. |
| */ |
| static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) |
| { |
| return atomic64_inc_return(&fs_info->tree_mod_seq); |
| } |
| |
| /* |
| * This adds a new blocker to the tree mod log's blocker list if the @elem |
| * passed does not already have a sequence number set. So when a caller expects |
| * to record tree modifications, it should ensure to set elem->seq to zero |
| * before calling btrfs_get_tree_mod_seq. |
| * Returns a fresh, unused tree log modification sequence number, even if no new |
| * blocker was added. |
| */ |
| u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, |
| struct seq_list *elem) |
| { |
| write_lock(&fs_info->tree_mod_log_lock); |
| if (!elem->seq) { |
| elem->seq = btrfs_inc_tree_mod_seq(fs_info); |
| list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); |
| } |
| write_unlock(&fs_info->tree_mod_log_lock); |
| |
| return elem->seq; |
| } |
| |
| void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, |
| struct seq_list *elem) |
| { |
| struct rb_root *tm_root; |
| struct rb_node *node; |
| struct rb_node *next; |
| struct seq_list *cur_elem; |
| struct tree_mod_elem *tm; |
| u64 min_seq = (u64)-1; |
| u64 seq_putting = elem->seq; |
| |
| if (!seq_putting) |
| return; |
| |
| write_lock(&fs_info->tree_mod_log_lock); |
| list_del(&elem->list); |
| elem->seq = 0; |
| |
| list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { |
| if (cur_elem->seq < min_seq) { |
| if (seq_putting > cur_elem->seq) { |
| /* |
| * blocker with lower sequence number exists, we |
| * cannot remove anything from the log |
| */ |
| write_unlock(&fs_info->tree_mod_log_lock); |
| return; |
| } |
| min_seq = cur_elem->seq; |
| } |
| } |
| |
| /* |
| * anything that's lower than the lowest existing (read: blocked) |
| * sequence number can be removed from the tree. |
| */ |
| tm_root = &fs_info->tree_mod_log; |
| for (node = rb_first(tm_root); node; node = next) { |
| next = rb_next(node); |
| tm = rb_entry(node, struct tree_mod_elem, node); |
| if (tm->seq >= min_seq) |
| continue; |
| rb_erase(node, tm_root); |
| kfree(tm); |
| } |
| write_unlock(&fs_info->tree_mod_log_lock); |
| } |
| |
| /* |
| * key order of the log: |
| * node/leaf start address -> sequence |
| * |
| * The 'start address' is the logical address of the *new* root node |
| * for root replace operations, or the logical address of the affected |
| * block for all other operations. |
| * |
| * Note: must be called with write lock for fs_info::tree_mod_log_lock. |
| */ |
| static noinline int |
| __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) |
| { |
| struct rb_root *tm_root; |
| struct rb_node **new; |
| struct rb_node *parent = NULL; |
| struct tree_mod_elem *cur; |
| |
| tm->seq = btrfs_inc_tree_mod_seq(fs_info); |
| |
| tm_root = &fs_info->tree_mod_log; |
| new = &tm_root->rb_node; |
| while (*new) { |
| cur = rb_entry(*new, struct tree_mod_elem, node); |
| parent = *new; |
| if (cur->logical < tm->logical) |
| new = &((*new)->rb_left); |
| else if (cur->logical > tm->logical) |
| new = &((*new)->rb_right); |
| else if (cur->seq < tm->seq) |
| new = &((*new)->rb_left); |
| else if (cur->seq > tm->seq) |
| new = &((*new)->rb_right); |
| else |
| return -EEXIST; |
| } |
| |
| rb_link_node(&tm->node, parent, new); |
| rb_insert_color(&tm->node, tm_root); |
| return 0; |
| } |
| |
| /* |
| * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it |
| * returns zero with the tree_mod_log_lock acquired. The caller must hold |
| * this until all tree mod log insertions are recorded in the rb tree and then |
| * write unlock fs_info::tree_mod_log_lock. |
| */ |
| static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, |
| struct extent_buffer *eb) { |
| smp_mb(); |
| if (list_empty(&(fs_info)->tree_mod_seq_list)) |
| return 1; |
| if (eb && btrfs_header_level(eb) == 0) |
| return 1; |
| |
| write_lock(&fs_info->tree_mod_log_lock); |
| if (list_empty(&(fs_info)->tree_mod_seq_list)) { |
| write_unlock(&fs_info->tree_mod_log_lock); |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ |
| static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info, |
| struct extent_buffer *eb) |
| { |
| smp_mb(); |
| if (list_empty(&(fs_info)->tree_mod_seq_list)) |
| return 0; |
| if (eb && btrfs_header_level(eb) == 0) |
| return 0; |
| |
| return 1; |
| } |
| |
| static struct tree_mod_elem * |
| alloc_tree_mod_elem(struct extent_buffer *eb, int slot, |
| enum mod_log_op op, gfp_t flags) |
| { |
| struct tree_mod_elem *tm; |
| |
| tm = kzalloc(sizeof(*tm), flags); |
| if (!tm) |
| return NULL; |
| |
| tm->logical = eb->start; |
| if (op != MOD_LOG_KEY_ADD) { |
| btrfs_node_key(eb, &tm->key, slot); |
| tm->blockptr = btrfs_node_blockptr(eb, slot); |
| } |
| tm->op = op; |
| tm->slot = slot; |
| tm->generation = btrfs_node_ptr_generation(eb, slot); |
| RB_CLEAR_NODE(&tm->node); |
| |
| return tm; |
| } |
| |
| static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot, |
| enum mod_log_op op, gfp_t flags) |
| { |
| struct tree_mod_elem *tm; |
| int ret; |
| |
| if (!tree_mod_need_log(eb->fs_info, eb)) |
| return 0; |
| |
| tm = alloc_tree_mod_elem(eb, slot, op, flags); |
| if (!tm) |
| return -ENOMEM; |
| |
| if (tree_mod_dont_log(eb->fs_info, eb)) { |
| kfree(tm); |
| return 0; |
| } |
| |
| ret = __tree_mod_log_insert(eb->fs_info, tm); |
| write_unlock(&eb->fs_info->tree_mod_log_lock); |
| if (ret) |
| kfree(tm); |
| |
| return ret; |
| } |
| |
| static noinline int tree_mod_log_insert_move(struct extent_buffer *eb, |
| int dst_slot, int src_slot, int nr_items) |
| { |
| struct tree_mod_elem *tm = NULL; |
| struct tree_mod_elem **tm_list = NULL; |
| int ret = 0; |
| int i; |
| int locked = 0; |
| |
| if (!tree_mod_need_log(eb->fs_info, eb)) |
| return 0; |
| |
| tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS); |
| if (!tm_list) |
| return -ENOMEM; |
| |
| tm = kzalloc(sizeof(*tm), GFP_NOFS); |
| if (!tm) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| |
| tm->logical = eb->start; |
| tm->slot = src_slot; |
| tm->move.dst_slot = dst_slot; |
| tm->move.nr_items = nr_items; |
| tm->op = MOD_LOG_MOVE_KEYS; |
| |
| for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { |
| tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, |
| MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS); |
| if (!tm_list[i]) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| } |
| |
| if (tree_mod_dont_log(eb->fs_info, eb)) |
| goto free_tms; |
| locked = 1; |
| |
| /* |
| * When we override something during the move, we log these removals. |
| * This can only happen when we move towards the beginning of the |
| * buffer, i.e. dst_slot < src_slot. |
| */ |
| for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { |
| ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]); |
| if (ret) |
| goto free_tms; |
| } |
| |
| ret = __tree_mod_log_insert(eb->fs_info, tm); |
| if (ret) |
| goto free_tms; |
| write_unlock(&eb->fs_info->tree_mod_log_lock); |
| kfree(tm_list); |
| |
| return 0; |
| free_tms: |
| for (i = 0; i < nr_items; i++) { |
| if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) |
| rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log); |
| kfree(tm_list[i]); |
| } |
| if (locked) |
| write_unlock(&eb->fs_info->tree_mod_log_lock); |
| kfree(tm_list); |
| kfree(tm); |
| |
| return ret; |
| } |
| |
| static inline int |
| __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, |
| struct tree_mod_elem **tm_list, |
| int nritems) |
| { |
| int i, j; |
| int ret; |
| |
| for (i = nritems - 1; i >= 0; i--) { |
| ret = __tree_mod_log_insert(fs_info, tm_list[i]); |
| if (ret) { |
| for (j = nritems - 1; j > i; j--) |
| rb_erase(&tm_list[j]->node, |
| &fs_info->tree_mod_log); |
| return ret; |
| } |
| } |
| |
| return 0; |
| } |
| |
| static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root, |
| struct extent_buffer *new_root, int log_removal) |
| { |
| struct btrfs_fs_info *fs_info = old_root->fs_info; |
| struct tree_mod_elem *tm = NULL; |
| struct tree_mod_elem **tm_list = NULL; |
| int nritems = 0; |
| int ret = 0; |
| int i; |
| |
| if (!tree_mod_need_log(fs_info, NULL)) |
| return 0; |
| |
| if (log_removal && btrfs_header_level(old_root) > 0) { |
| nritems = btrfs_header_nritems(old_root); |
| tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), |
| GFP_NOFS); |
| if (!tm_list) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| for (i = 0; i < nritems; i++) { |
| tm_list[i] = alloc_tree_mod_elem(old_root, i, |
| MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); |
| if (!tm_list[i]) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| } |
| } |
| |
| tm = kzalloc(sizeof(*tm), GFP_NOFS); |
| if (!tm) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| |
| tm->logical = new_root->start; |
| tm->old_root.logical = old_root->start; |
| tm->old_root.level = btrfs_header_level(old_root); |
| tm->generation = btrfs_header_generation(old_root); |
| tm->op = MOD_LOG_ROOT_REPLACE; |
| |
| if (tree_mod_dont_log(fs_info, NULL)) |
| goto free_tms; |
| |
| if (tm_list) |
| ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); |
| if (!ret) |
| ret = __tree_mod_log_insert(fs_info, tm); |
| |
| write_unlock(&fs_info->tree_mod_log_lock); |
| if (ret) |
| goto free_tms; |
| kfree(tm_list); |
| |
| return ret; |
| |
| free_tms: |
| if (tm_list) { |
| for (i = 0; i < nritems; i++) |
| kfree(tm_list[i]); |
| kfree(tm_list); |
| } |
| kfree(tm); |
| |
| return ret; |
| } |
| |
| static struct tree_mod_elem * |
| __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, |
| int smallest) |
| { |
| struct rb_root *tm_root; |
| struct rb_node *node; |
| struct tree_mod_elem *cur = NULL; |
| struct tree_mod_elem *found = NULL; |
| |
| read_lock(&fs_info->tree_mod_log_lock); |
| tm_root = &fs_info->tree_mod_log; |
| node = tm_root->rb_node; |
| while (node) { |
| cur = rb_entry(node, struct tree_mod_elem, node); |
| if (cur->logical < start) { |
| node = node->rb_left; |
| } else if (cur->logical > start) { |
| node = node->rb_right; |
| } else if (cur->seq < min_seq) { |
| node = node->rb_left; |
| } else if (!smallest) { |
| /* we want the node with the highest seq */ |
| if (found) |
| BUG_ON(found->seq > cur->seq); |
| found = cur; |
| node = node->rb_left; |
| } else if (cur->seq > min_seq) { |
| /* we want the node with the smallest seq */ |
| if (found) |
| BUG_ON(found->seq < cur->seq); |
| found = cur; |
| node = node->rb_right; |
| } else { |
| found = cur; |
| break; |
| } |
| } |
| read_unlock(&fs_info->tree_mod_log_lock); |
| |
| return found; |
| } |
| |
| /* |
| * this returns the element from the log with the smallest time sequence |
| * value that's in the log (the oldest log item). any element with a time |
| * sequence lower than min_seq will be ignored. |
| */ |
| static struct tree_mod_elem * |
| tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, |
| u64 min_seq) |
| { |
| return __tree_mod_log_search(fs_info, start, min_seq, 1); |
| } |
| |
| /* |
| * this returns the element from the log with the largest time sequence |
| * value that's in the log (the most recent log item). any element with |
| * a time sequence lower than min_seq will be ignored. |
| */ |
| static struct tree_mod_elem * |
| tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) |
| { |
| return __tree_mod_log_search(fs_info, start, min_seq, 0); |
| } |
| |
| static noinline int |
| tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, |
| struct extent_buffer *src, unsigned long dst_offset, |
| unsigned long src_offset, int nr_items) |
| { |
| int ret = 0; |
| struct tree_mod_elem **tm_list = NULL; |
| struct tree_mod_elem **tm_list_add, **tm_list_rem; |
| int i; |
| int locked = 0; |
| |
| if (!tree_mod_need_log(fs_info, NULL)) |
| return 0; |
| |
| if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) |
| return 0; |
| |
| tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *), |
| GFP_NOFS); |
| if (!tm_list) |
| return -ENOMEM; |
| |
| tm_list_add = tm_list; |
| tm_list_rem = tm_list + nr_items; |
| for (i = 0; i < nr_items; i++) { |
| tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset, |
| MOD_LOG_KEY_REMOVE, GFP_NOFS); |
| if (!tm_list_rem[i]) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| |
| tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset, |
| MOD_LOG_KEY_ADD, GFP_NOFS); |
| if (!tm_list_add[i]) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| } |
| |
| if (tree_mod_dont_log(fs_info, NULL)) |
| goto free_tms; |
| locked = 1; |
| |
| for (i = 0; i < nr_items; i++) { |
| ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]); |
| if (ret) |
| goto free_tms; |
| ret = __tree_mod_log_insert(fs_info, tm_list_add[i]); |
| if (ret) |
| goto free_tms; |
| } |
| |
| write_unlock(&fs_info->tree_mod_log_lock); |
| kfree(tm_list); |
| |
| return 0; |
| |
| free_tms: |
| for (i = 0; i < nr_items * 2; i++) { |
| if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) |
| rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); |
| kfree(tm_list[i]); |
| } |
| if (locked) |
| write_unlock(&fs_info->tree_mod_log_lock); |
| kfree(tm_list); |
| |
| return ret; |
| } |
| |
| static noinline int tree_mod_log_free_eb(struct extent_buffer *eb) |
| { |
| struct tree_mod_elem **tm_list = NULL; |
| int nritems = 0; |
| int i; |
| int ret = 0; |
| |
| if (btrfs_header_level(eb) == 0) |
| return 0; |
| |
| if (!tree_mod_need_log(eb->fs_info, NULL)) |
| return 0; |
| |
| nritems = btrfs_header_nritems(eb); |
| tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS); |
| if (!tm_list) |
| return -ENOMEM; |
| |
| for (i = 0; i < nritems; i++) { |
| tm_list[i] = alloc_tree_mod_elem(eb, i, |
| MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); |
| if (!tm_list[i]) { |
| ret = -ENOMEM; |
| goto free_tms; |
| } |
| } |
| |
| if (tree_mod_dont_log(eb->fs_info, eb)) |
| goto free_tms; |
| |
| ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems); |
| write_unlock(&eb->fs_info->tree_mod_log_lock); |
| if (ret) |
| goto free_tms; |
| kfree(tm_list); |
| |
| return 0; |
| |
| free_tms: |
| for (i = 0; i < nritems; i++) |
| kfree(tm_list[i]); |
| kfree(tm_list); |
| |
| return ret; |
| } |
| |
| /* |
| * check if the tree block can be shared by multiple trees |
| */ |
| int btrfs_block_can_be_shared(struct btrfs_root *root, |
| struct extent_buffer *buf) |
| { |
| /* |
| * Tree blocks not in reference counted trees and tree roots |
| * are never shared. If a block was allocated after the last |
| * snapshot and the block was not allocated by tree relocation, |
| * we know the block is not shared. |
| */ |
| if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
| buf != root->node && buf != root->commit_root && |
| (btrfs_header_generation(buf) <= |
| btrfs_root_last_snapshot(&root->root_item) || |
| btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) |
| return 1; |
| |
| return 0; |
| } |
| |
| static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| struct extent_buffer *buf, |
| struct extent_buffer *cow, |
| int *last_ref) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| u64 refs; |
| u64 owner; |
| u64 flags; |
| u64 new_flags = 0; |
| int ret; |
| |
| /* |
| * Backrefs update rules: |
| * |
| * Always use full backrefs for extent pointers in tree block |
| * allocated by tree relocation. |
| * |
| * If a shared tree block is no longer referenced by its owner |
| * tree (btrfs_header_owner(buf) == root->root_key.objectid), |
| * use full backrefs for extent pointers in tree block. |
| * |
| * If a tree block is been relocating |
| * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), |
| * use full backrefs for extent pointers in tree block. |
| * The reason for this is some operations (such as drop tree) |
| * are only allowed for blocks use full backrefs. |
| */ |
| |
| if (btrfs_block_can_be_shared(root, buf)) { |
| ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, |
| btrfs_header_level(buf), 1, |
| &refs, &flags); |
| if (ret) |
| return ret; |
| if (refs == 0) { |
| ret = -EROFS; |
| btrfs_handle_fs_error(fs_info, ret, NULL); |
| return ret; |
| } |
| } else { |
| refs = 1; |
| if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || |
| btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) |
| flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
| else |
| flags = 0; |
| } |
| |
| owner = btrfs_header_owner(buf); |
| BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && |
| !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); |
| |
| if (refs > 1) { |
| if ((owner == root->root_key.objectid || |
| root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && |
| !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { |
| ret = btrfs_inc_ref(trans, root, buf, 1); |
| if (ret) |
| return ret; |
| |
| if (root->root_key.objectid == |
| BTRFS_TREE_RELOC_OBJECTID) { |
| ret = btrfs_dec_ref(trans, root, buf, 0); |
| if (ret) |
| return ret; |
| ret = btrfs_inc_ref(trans, root, cow, 1); |
| if (ret) |
| return ret; |
| } |
| new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; |
| } else { |
| |
| if (root->root_key.objectid == |
| BTRFS_TREE_RELOC_OBJECTID) |
| ret = btrfs_inc_ref(trans, root, cow, 1); |
| else |
| ret = btrfs_inc_ref(trans, root, cow, 0); |
| if (ret) |
| return ret; |
| } |
| if (new_flags != 0) { |
| int level = btrfs_header_level(buf); |
| |
| ret = btrfs_set_disk_extent_flags(trans, fs_info, |
| buf->start, |
| buf->len, |
| new_flags, level, 0); |
| if (ret) |
| return ret; |
| } |
| } else { |
| if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { |
| if (root->root_key.objectid == |
| BTRFS_TREE_RELOC_OBJECTID) |
| ret = btrfs_inc_ref(trans, root, cow, 1); |
| else |
| ret = btrfs_inc_ref(trans, root, cow, 0); |
| if (ret) |
| return ret; |
| ret = btrfs_dec_ref(trans, root, buf, 1); |
| if (ret) |
| return ret; |
| } |
| clean_tree_block(fs_info, buf); |
| *last_ref = 1; |
| } |
| return 0; |
| } |
| |
| static struct extent_buffer *alloc_tree_block_no_bg_flush( |
| struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| u64 parent_start, |
| const struct btrfs_disk_key *disk_key, |
| int level, |
| u64 hint, |
| u64 empty_size) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *ret; |
| |
| /* |
| * If we are COWing a node/leaf from the extent, chunk, device or free |
| * space trees, make sure that we do not finish block group creation of |
| * pending block groups. We do this to avoid a deadlock. |
| * COWing can result in allocation of a new chunk, and flushing pending |
| * block groups (btrfs_create_pending_block_groups()) can be triggered |
| * when finishing allocation of a new chunk. Creation of a pending block |
| * group modifies the extent, chunk, device and free space trees, |
| * therefore we could deadlock with ourselves since we are holding a |
| * lock on an extent buffer that btrfs_create_pending_block_groups() may |
| * try to COW later. |
| * For similar reasons, we also need to delay flushing pending block |
| * groups when splitting a leaf or node, from one of those trees, since |
| * we are holding a write lock on it and its parent or when inserting a |
| * new root node for one of those trees. |
| */ |
| if (root == fs_info->extent_root || |
| root == fs_info->chunk_root || |
| root == fs_info->dev_root || |
| root == fs_info->free_space_root) |
| trans->can_flush_pending_bgs = false; |
| |
| ret = btrfs_alloc_tree_block(trans, root, parent_start, |
| root->root_key.objectid, disk_key, level, |
| hint, empty_size); |
| trans->can_flush_pending_bgs = true; |
| |
| return ret; |
| } |
| |
| /* |
| * does the dirty work in cow of a single block. The parent block (if |
| * supplied) is updated to point to the new cow copy. The new buffer is marked |
| * dirty and returned locked. If you modify the block it needs to be marked |
| * dirty again. |
| * |
| * search_start -- an allocation hint for the new block |
| * |
| * empty_size -- a hint that you plan on doing more cow. This is the size in |
| * bytes the allocator should try to find free next to the block it returns. |
| * This is just a hint and may be ignored by the allocator. |
| */ |
| static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| struct extent_buffer *buf, |
| struct extent_buffer *parent, int parent_slot, |
| struct extent_buffer **cow_ret, |
| u64 search_start, u64 empty_size) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct btrfs_disk_key disk_key; |
| struct extent_buffer *cow; |
| int level, ret; |
| int last_ref = 0; |
| int unlock_orig = 0; |
| u64 parent_start = 0; |
| |
| if (*cow_ret == buf) |
| unlock_orig = 1; |
| |
| btrfs_assert_tree_locked(buf); |
| |
| WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
| trans->transid != fs_info->running_transaction->transid); |
| WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
| trans->transid != root->last_trans); |
| |
| level = btrfs_header_level(buf); |
| |
| if (level == 0) |
| btrfs_item_key(buf, &disk_key, 0); |
| else |
| btrfs_node_key(buf, &disk_key, 0); |
| |
| if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) |
| parent_start = parent->start; |
| |
| cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, |
| level, search_start, empty_size); |
| if (IS_ERR(cow)) |
| return PTR_ERR(cow); |
| |
| /* cow is set to blocking by btrfs_init_new_buffer */ |
| |
| copy_extent_buffer_full(cow, buf); |
| btrfs_set_header_bytenr(cow, cow->start); |
| btrfs_set_header_generation(cow, trans->transid); |
| btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); |
| btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | |
| BTRFS_HEADER_FLAG_RELOC); |
| if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) |
| btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); |
| else |
| btrfs_set_header_owner(cow, root->root_key.objectid); |
| |
| write_extent_buffer_fsid(cow, fs_info->fsid); |
| |
| ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); |
| if (ret) { |
| btrfs_abort_transaction(trans, ret); |
| return ret; |
| } |
| |
| if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { |
| ret = btrfs_reloc_cow_block(trans, root, buf, cow); |
| if (ret) { |
| btrfs_abort_transaction(trans, ret); |
| return ret; |
| } |
| } |
| |
| if (buf == root->node) { |
| WARN_ON(parent && parent != buf); |
| if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || |
| btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) |
| parent_start = buf->start; |
| |
| extent_buffer_get(cow); |
| ret = tree_mod_log_insert_root(root->node, cow, 1); |
| BUG_ON(ret < 0); |
| rcu_assign_pointer(root->node, cow); |
| |
| btrfs_free_tree_block(trans, root, buf, parent_start, |
| last_ref); |
| free_extent_buffer(buf); |
| add_root_to_dirty_list(root); |
| } else { |
| WARN_ON(trans->transid != btrfs_header_generation(parent)); |
| tree_mod_log_insert_key(parent, parent_slot, |
| MOD_LOG_KEY_REPLACE, GFP_NOFS); |
| btrfs_set_node_blockptr(parent, parent_slot, |
| cow->start); |
| btrfs_set_node_ptr_generation(parent, parent_slot, |
| trans->transid); |
| btrfs_mark_buffer_dirty(parent); |
| if (last_ref) { |
| ret = tree_mod_log_free_eb(buf); |
| if (ret) { |
| btrfs_abort_transaction(trans, ret); |
| return ret; |
| } |
| } |
| btrfs_free_tree_block(trans, root, buf, parent_start, |
| last_ref); |
| } |
| if (unlock_orig) |
| btrfs_tree_unlock(buf); |
| free_extent_buffer_stale(buf); |
| btrfs_mark_buffer_dirty(cow); |
| *cow_ret = cow; |
| return 0; |
| } |
| |
| /* |
| * returns the logical address of the oldest predecessor of the given root. |
| * entries older than time_seq are ignored. |
| */ |
| static struct tree_mod_elem *__tree_mod_log_oldest_root( |
| struct extent_buffer *eb_root, u64 time_seq) |
| { |
| struct tree_mod_elem *tm; |
| struct tree_mod_elem *found = NULL; |
| u64 root_logical = eb_root->start; |
| int looped = 0; |
| |
| if (!time_seq) |
| return NULL; |
| |
| /* |
| * the very last operation that's logged for a root is the |
| * replacement operation (if it is replaced at all). this has |
| * the logical address of the *new* root, making it the very |
| * first operation that's logged for this root. |
| */ |
| while (1) { |
| tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical, |
| time_seq); |
| if (!looped && !tm) |
| return NULL; |
| /* |
| * if there are no tree operation for the oldest root, we simply |
| * return it. this should only happen if that (old) root is at |
| * level 0. |
| */ |
| if (!tm) |
| break; |
| |
| /* |
| * if there's an operation that's not a root replacement, we |
| * found the oldest version of our root. normally, we'll find a |
| * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. |
| */ |
| if (tm->op != MOD_LOG_ROOT_REPLACE) |
| break; |
| |
| found = tm; |
| root_logical = tm->old_root.logical; |
| looped = 1; |
| } |
| |
| /* if there's no old root to return, return what we found instead */ |
| if (!found) |
| found = tm; |
| |
| return found; |
| } |
| |
| /* |
| * tm is a pointer to the first operation to rewind within eb. then, all |
| * previous operations will be rewound (until we reach something older than |
| * time_seq). |
| */ |
| static void |
| __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, |
| u64 time_seq, struct tree_mod_elem *first_tm) |
| { |
| u32 n; |
| struct rb_node *next; |
| struct tree_mod_elem *tm = first_tm; |
| unsigned long o_dst; |
| unsigned long o_src; |
| unsigned long p_size = sizeof(struct btrfs_key_ptr); |
| |
| n = btrfs_header_nritems(eb); |
| read_lock(&fs_info->tree_mod_log_lock); |
| while (tm && tm->seq >= time_seq) { |
| /* |
| * all the operations are recorded with the operator used for |
| * the modification. as we're going backwards, we do the |
| * opposite of each operation here. |
| */ |
| switch (tm->op) { |
| case MOD_LOG_KEY_REMOVE_WHILE_FREEING: |
| BUG_ON(tm->slot < n); |
| /* Fallthrough */ |
| case MOD_LOG_KEY_REMOVE_WHILE_MOVING: |
| case MOD_LOG_KEY_REMOVE: |
| btrfs_set_node_key(eb, &tm->key, tm->slot); |
| btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); |
| btrfs_set_node_ptr_generation(eb, tm->slot, |
| tm->generation); |
| n++; |
| break; |
| case MOD_LOG_KEY_REPLACE: |
| BUG_ON(tm->slot >= n); |
| btrfs_set_node_key(eb, &tm->key, tm->slot); |
| btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); |
| btrfs_set_node_ptr_generation(eb, tm->slot, |
| tm->generation); |
| break; |
| case MOD_LOG_KEY_ADD: |
| /* if a move operation is needed it's in the log */ |
| n--; |
| break; |
| case MOD_LOG_MOVE_KEYS: |
| o_dst = btrfs_node_key_ptr_offset(tm->slot); |
| o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); |
| memmove_extent_buffer(eb, o_dst, o_src, |
| tm->move.nr_items * p_size); |
| break; |
| case MOD_LOG_ROOT_REPLACE: |
| /* |
| * this operation is special. for roots, this must be |
| * handled explicitly before rewinding. |
| * for non-roots, this operation may exist if the node |
| * was a root: root A -> child B; then A gets empty and |
| * B is promoted to the new root. in the mod log, we'll |
| * have a root-replace operation for B, a tree block |
| * that is no root. we simply ignore that operation. |
| */ |
| break; |
| } |
| next = rb_next(&tm->node); |
| if (!next) |
| break; |
| tm = rb_entry(next, struct tree_mod_elem, node); |
| if (tm->logical != first_tm->logical) |
| break; |
| } |
| read_unlock(&fs_info->tree_mod_log_lock); |
| btrfs_set_header_nritems(eb, n); |
| } |
| |
| /* |
| * Called with eb read locked. If the buffer cannot be rewound, the same buffer |
| * is returned. If rewind operations happen, a fresh buffer is returned. The |
| * returned buffer is always read-locked. If the returned buffer is not the |
| * input buffer, the lock on the input buffer is released and the input buffer |
| * is freed (its refcount is decremented). |
| */ |
| static struct extent_buffer * |
| tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, |
| struct extent_buffer *eb, u64 time_seq) |
| { |
| struct extent_buffer *eb_rewin; |
| struct tree_mod_elem *tm; |
| |
| if (!time_seq) |
| return eb; |
| |
| if (btrfs_header_level(eb) == 0) |
| return eb; |
| |
| tm = tree_mod_log_search(fs_info, eb->start, time_seq); |
| if (!tm) |
| return eb; |
| |
| btrfs_set_path_blocking(path); |
| btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
| |
| if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { |
| BUG_ON(tm->slot != 0); |
| eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); |
| if (!eb_rewin) { |
| btrfs_tree_read_unlock_blocking(eb); |
| free_extent_buffer(eb); |
| return NULL; |
| } |
| btrfs_set_header_bytenr(eb_rewin, eb->start); |
| btrfs_set_header_backref_rev(eb_rewin, |
| btrfs_header_backref_rev(eb)); |
| btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); |
| btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); |
| } else { |
| eb_rewin = btrfs_clone_extent_buffer(eb); |
| if (!eb_rewin) { |
| btrfs_tree_read_unlock_blocking(eb); |
| free_extent_buffer(eb); |
| return NULL; |
| } |
| } |
| |
| btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK); |
| btrfs_tree_read_unlock_blocking(eb); |
| free_extent_buffer(eb); |
| |
| extent_buffer_get(eb_rewin); |
| btrfs_tree_read_lock(eb_rewin); |
| __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); |
| WARN_ON(btrfs_header_nritems(eb_rewin) > |
| BTRFS_NODEPTRS_PER_BLOCK(fs_info)); |
| |
| return eb_rewin; |
| } |
| |
| /* |
| * get_old_root() rewinds the state of @root's root node to the given @time_seq |
| * value. If there are no changes, the current root->root_node is returned. If |
| * anything changed in between, there's a fresh buffer allocated on which the |
| * rewind operations are done. In any case, the returned buffer is read locked. |
| * Returns NULL on error (with no locks held). |
| */ |
| static inline struct extent_buffer * |
| get_old_root(struct btrfs_root *root, u64 time_seq) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct tree_mod_elem *tm; |
| struct extent_buffer *eb = NULL; |
| struct extent_buffer *eb_root; |
| u64 eb_root_owner = 0; |
| struct extent_buffer *old; |
| struct tree_mod_root *old_root = NULL; |
| u64 old_generation = 0; |
| u64 logical; |
| int level; |
| |
| eb_root = btrfs_read_lock_root_node(root); |
| tm = __tree_mod_log_oldest_root(eb_root, time_seq); |
| if (!tm) |
| return eb_root; |
| |
| if (tm->op == MOD_LOG_ROOT_REPLACE) { |
| old_root = &tm->old_root; |
| old_generation = tm->generation; |
| logical = old_root->logical; |
| level = old_root->level; |
| } else { |
| logical = eb_root->start; |
| level = btrfs_header_level(eb_root); |
| } |
| |
| tm = tree_mod_log_search(fs_info, logical, time_seq); |
| if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { |
| btrfs_tree_read_unlock(eb_root); |
| free_extent_buffer(eb_root); |
| old = read_tree_block(fs_info, logical, 0, level, NULL); |
| if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) { |
| if (!IS_ERR(old)) |
| free_extent_buffer(old); |
| btrfs_warn(fs_info, |
| "failed to read tree block %llu from get_old_root", |
| logical); |
| } else { |
| eb = btrfs_clone_extent_buffer(old); |
| free_extent_buffer(old); |
| } |
| } else if (old_root) { |
| eb_root_owner = btrfs_header_owner(eb_root); |
| btrfs_tree_read_unlock(eb_root); |
| free_extent_buffer(eb_root); |
| eb = alloc_dummy_extent_buffer(fs_info, logical); |
| } else { |
| btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); |
| eb = btrfs_clone_extent_buffer(eb_root); |
| btrfs_tree_read_unlock_blocking(eb_root); |
| free_extent_buffer(eb_root); |
| } |
| |
| if (!eb) |
| return NULL; |
| extent_buffer_get(eb); |
| btrfs_tree_read_lock(eb); |
| if (old_root) { |
| btrfs_set_header_bytenr(eb, eb->start); |
| btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); |
| btrfs_set_header_owner(eb, eb_root_owner); |
| btrfs_set_header_level(eb, old_root->level); |
| btrfs_set_header_generation(eb, old_generation); |
| } |
| if (tm) |
| __tree_mod_log_rewind(fs_info, eb, time_seq, tm); |
| else |
| WARN_ON(btrfs_header_level(eb) != 0); |
| WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info)); |
| |
| return eb; |
| } |
| |
| int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) |
| { |
| struct tree_mod_elem *tm; |
| int level; |
| struct extent_buffer *eb_root = btrfs_root_node(root); |
| |
| tm = __tree_mod_log_oldest_root(eb_root, time_seq); |
| if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { |
| level = tm->old_root.level; |
| } else { |
| level = btrfs_header_level(eb_root); |
| } |
| free_extent_buffer(eb_root); |
| |
| return level; |
| } |
| |
| static inline int should_cow_block(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| struct extent_buffer *buf) |
| { |
| if (btrfs_is_testing(root->fs_info)) |
| return 0; |
| |
| /* Ensure we can see the FORCE_COW bit */ |
| smp_mb__before_atomic(); |
| |
| /* |
| * We do not need to cow a block if |
| * 1) this block is not created or changed in this transaction; |
| * 2) this block does not belong to TREE_RELOC tree; |
| * 3) the root is not forced COW. |
| * |
| * What is forced COW: |
| * when we create snapshot during committing the transaction, |
| * after we've finished coping src root, we must COW the shared |
| * block to ensure the metadata consistency. |
| */ |
| if (btrfs_header_generation(buf) == trans->transid && |
| !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && |
| !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
| btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && |
| !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) |
| return 0; |
| return 1; |
| } |
| |
| /* |
| * cows a single block, see __btrfs_cow_block for the real work. |
| * This version of it has extra checks so that a block isn't COWed more than |
| * once per transaction, as long as it hasn't been written yet |
| */ |
| noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, struct extent_buffer *buf, |
| struct extent_buffer *parent, int parent_slot, |
| struct extent_buffer **cow_ret) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| u64 search_start; |
| int ret; |
| |
| if (trans->transaction != fs_info->running_transaction) |
| WARN(1, KERN_CRIT "trans %llu running %llu\n", |
| trans->transid, |
| fs_info->running_transaction->transid); |
| |
| if (trans->transid != fs_info->generation) |
| WARN(1, KERN_CRIT "trans %llu running %llu\n", |
| trans->transid, fs_info->generation); |
| |
| if (!should_cow_block(trans, root, buf)) { |
| trans->dirty = true; |
| *cow_ret = buf; |
| return 0; |
| } |
| |
| search_start = buf->start & ~((u64)SZ_1G - 1); |
| |
| if (parent) |
| btrfs_set_lock_blocking(parent); |
| btrfs_set_lock_blocking(buf); |
| |
| ret = __btrfs_cow_block(trans, root, buf, parent, |
| parent_slot, cow_ret, search_start, 0); |
| |
| trace_btrfs_cow_block(root, buf, *cow_ret); |
| |
| return ret; |
| } |
| |
| /* |
| * helper function for defrag to decide if two blocks pointed to by a |
| * node are actually close by |
| */ |
| static int close_blocks(u64 blocknr, u64 other, u32 blocksize) |
| { |
| if (blocknr < other && other - (blocknr + blocksize) < 32768) |
| return 1; |
| if (blocknr > other && blocknr - (other + blocksize) < 32768) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * compare two keys in a memcmp fashion |
| */ |
| static int comp_keys(const struct btrfs_disk_key *disk, |
| const struct btrfs_key *k2) |
| { |
| struct btrfs_key k1; |
| |
| btrfs_disk_key_to_cpu(&k1, disk); |
| |
| return btrfs_comp_cpu_keys(&k1, k2); |
| } |
| |
| /* |
| * same as comp_keys only with two btrfs_key's |
| */ |
| int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) |
| { |
| if (k1->objectid > k2->objectid) |
| return 1; |
| if (k1->objectid < k2->objectid) |
| return -1; |
| if (k1->type > k2->type) |
| return 1; |
| if (k1->type < k2->type) |
| return -1; |
| if (k1->offset > k2->offset) |
| return 1; |
| if (k1->offset < k2->offset) |
| return -1; |
| return 0; |
| } |
| |
| /* |
| * this is used by the defrag code to go through all the |
| * leaves pointed to by a node and reallocate them so that |
| * disk order is close to key order |
| */ |
| int btrfs_realloc_node(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, struct extent_buffer *parent, |
| int start_slot, u64 *last_ret, |
| struct btrfs_key *progress) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *cur; |
| u64 blocknr; |
| u64 gen; |
| u64 search_start = *last_ret; |
| u64 last_block = 0; |
| u64 other; |
| u32 parent_nritems; |
| int end_slot; |
| int i; |
| int err = 0; |
| int parent_level; |
| int uptodate; |
| u32 blocksize; |
| int progress_passed = 0; |
| struct btrfs_disk_key disk_key; |
| |
| parent_level = btrfs_header_level(parent); |
| |
| WARN_ON(trans->transaction != fs_info->running_transaction); |
| WARN_ON(trans->transid != fs_info->generation); |
| |
| parent_nritems = btrfs_header_nritems(parent); |
| blocksize = fs_info->nodesize; |
| end_slot = parent_nritems - 1; |
| |
| if (parent_nritems <= 1) |
| return 0; |
| |
| btrfs_set_lock_blocking(parent); |
| |
| for (i = start_slot; i <= end_slot; i++) { |
| struct btrfs_key first_key; |
| int close = 1; |
| |
| btrfs_node_key(parent, &disk_key, i); |
| if (!progress_passed && comp_keys(&disk_key, progress) < 0) |
| continue; |
| |
| progress_passed = 1; |
| blocknr = btrfs_node_blockptr(parent, i); |
| gen = btrfs_node_ptr_generation(parent, i); |
| btrfs_node_key_to_cpu(parent, &first_key, i); |
| if (last_block == 0) |
| last_block = blocknr; |
| |
| if (i > 0) { |
| other = btrfs_node_blockptr(parent, i - 1); |
| close = close_blocks(blocknr, other, blocksize); |
| } |
| if (!close && i < end_slot) { |
| other = btrfs_node_blockptr(parent, i + 1); |
| close = close_blocks(blocknr, other, blocksize); |
| } |
| if (close) { |
| last_block = blocknr; |
| continue; |
| } |
| |
| cur = find_extent_buffer(fs_info, blocknr); |
| if (cur) |
| uptodate = btrfs_buffer_uptodate(cur, gen, 0); |
| else |
| uptodate = 0; |
| if (!cur || !uptodate) { |
| if (!cur) { |
| cur = read_tree_block(fs_info, blocknr, gen, |
| parent_level - 1, |
| &first_key); |
| if (IS_ERR(cur)) { |
| return PTR_ERR(cur); |
| } else if (!extent_buffer_uptodate(cur)) { |
| free_extent_buffer(cur); |
| return -EIO; |
| } |
| } else if (!uptodate) { |
| err = btrfs_read_buffer(cur, gen, |
| parent_level - 1,&first_key); |
| if (err) { |
| free_extent_buffer(cur); |
| return err; |
| } |
| } |
| } |
| if (search_start == 0) |
| search_start = last_block; |
| |
| btrfs_tree_lock(cur); |
| btrfs_set_lock_blocking(cur); |
| err = __btrfs_cow_block(trans, root, cur, parent, i, |
| &cur, search_start, |
| min(16 * blocksize, |
| (end_slot - i) * blocksize)); |
| if (err) { |
| btrfs_tree_unlock(cur); |
| free_extent_buffer(cur); |
| break; |
| } |
| search_start = cur->start; |
| last_block = cur->start; |
| *last_ret = search_start; |
| btrfs_tree_unlock(cur); |
| free_extent_buffer(cur); |
| } |
| return err; |
| } |
| |
| /* |
| * search for key in the extent_buffer. The items start at offset p, |
| * and they are item_size apart. There are 'max' items in p. |
| * |
| * the slot in the array is returned via slot, and it points to |
| * the place where you would insert key if it is not found in |
| * the array. |
| * |
| * slot may point to max if the key is bigger than all of the keys |
| */ |
| static noinline int generic_bin_search(struct extent_buffer *eb, |
| unsigned long p, int item_size, |
| const struct btrfs_key *key, |
| int max, int *slot) |
| { |
| int low = 0; |
| int high = max; |
| int mid; |
| int ret; |
| struct btrfs_disk_key *tmp = NULL; |
| struct btrfs_disk_key unaligned; |
| unsigned long offset; |
| char *kaddr = NULL; |
| unsigned long map_start = 0; |
| unsigned long map_len = 0; |
| int err; |
| |
| if (low > high) { |
| btrfs_err(eb->fs_info, |
| "%s: low (%d) > high (%d) eb %llu owner %llu level %d", |
| __func__, low, high, eb->start, |
| btrfs_header_owner(eb), btrfs_header_level(eb)); |
| return -EINVAL; |
| } |
| |
| while (low < high) { |
| mid = (low + high) / 2; |
| offset = p + mid * item_size; |
| |
| if (!kaddr || offset < map_start || |
| (offset + sizeof(struct btrfs_disk_key)) > |
| map_start + map_len) { |
| |
| err = map_private_extent_buffer(eb, offset, |
| sizeof(struct btrfs_disk_key), |
| &kaddr, &map_start, &map_len); |
| |
| if (!err) { |
| tmp = (struct btrfs_disk_key *)(kaddr + offset - |
| map_start); |
| } else if (err == 1) { |
| read_extent_buffer(eb, &unaligned, |
| offset, sizeof(unaligned)); |
| tmp = &unaligned; |
| } else { |
| return err; |
| } |
| |
| } else { |
| tmp = (struct btrfs_disk_key *)(kaddr + offset - |
| map_start); |
| } |
| ret = comp_keys(tmp, key); |
| |
| if (ret < 0) |
| low = mid + 1; |
| else if (ret > 0) |
| high = mid; |
| else { |
| *slot = mid; |
| return 0; |
| } |
| } |
| *slot = low; |
| return 1; |
| } |
| |
| /* |
| * simple bin_search frontend that does the right thing for |
| * leaves vs nodes |
| */ |
| int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, |
| int level, int *slot) |
| { |
| if (level == 0) |
| return generic_bin_search(eb, |
| offsetof(struct btrfs_leaf, items), |
| sizeof(struct btrfs_item), |
| key, btrfs_header_nritems(eb), |
| slot); |
| else |
| return generic_bin_search(eb, |
| offsetof(struct btrfs_node, ptrs), |
| sizeof(struct btrfs_key_ptr), |
| key, btrfs_header_nritems(eb), |
| slot); |
| } |
| |
| static void root_add_used(struct btrfs_root *root, u32 size) |
| { |
| spin_lock(&root->accounting_lock); |
| btrfs_set_root_used(&root->root_item, |
| btrfs_root_used(&root->root_item) + size); |
| spin_unlock(&root->accounting_lock); |
| } |
| |
| static void root_sub_used(struct btrfs_root *root, u32 size) |
| { |
| spin_lock(&root->accounting_lock); |
| btrfs_set_root_used(&root->root_item, |
| btrfs_root_used(&root->root_item) - size); |
| spin_unlock(&root->accounting_lock); |
| } |
| |
| /* given a node and slot number, this reads the blocks it points to. The |
| * extent buffer is returned with a reference taken (but unlocked). |
| */ |
| static noinline struct extent_buffer * |
| read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent, |
| int slot) |
| { |
| int level = btrfs_header_level(parent); |
| struct extent_buffer *eb; |
| struct btrfs_key first_key; |
| |
| if (slot < 0 || slot >= btrfs_header_nritems(parent)) |
| return ERR_PTR(-ENOENT); |
| |
| BUG_ON(level == 0); |
| |
| btrfs_node_key_to_cpu(parent, &first_key, slot); |
| eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot), |
| btrfs_node_ptr_generation(parent, slot), |
| level - 1, &first_key); |
| if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) { |
| free_extent_buffer(eb); |
| eb = ERR_PTR(-EIO); |
| } |
| |
| return eb; |
| } |
| |
| /* |
| * node level balancing, used to make sure nodes are in proper order for |
| * item deletion. We balance from the top down, so we have to make sure |
| * that a deletion won't leave an node completely empty later on. |
| */ |
| static noinline int balance_level(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| struct btrfs_path *path, int level) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *right = NULL; |
| struct extent_buffer *mid; |
| struct extent_buffer *left = NULL; |
| struct extent_buffer *parent = NULL; |
| int ret = 0; |
| int wret; |
| int pslot; |
| int orig_slot = path->slots[level]; |
| u64 orig_ptr; |
| |
| if (level == 0) |
| return 0; |
| |
| mid = path->nodes[level]; |
| |
| WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && |
| path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); |
| WARN_ON(btrfs_header_generation(mid) != trans->transid); |
| |
| orig_ptr = btrfs_node_blockptr(mid, orig_slot); |
| |
| if (level < BTRFS_MAX_LEVEL - 1) { |
| parent = path->nodes[level + 1]; |
| pslot = path->slots[level + 1]; |
| } |
| |
| /* |
| * deal with the case where there is only one pointer in the root |
| * by promoting the node below to a root |
| */ |
| if (!parent) { |
| struct extent_buffer *child; |
| |
| if (btrfs_header_nritems(mid) != 1) |
| return 0; |
| |
| /* promote the child to a root */ |
| child = read_node_slot(fs_info, mid, 0); |
| if (IS_ERR(child)) { |
| ret = PTR_ERR(child); |
| btrfs_handle_fs_error(fs_info, ret, NULL); |
| goto enospc; |
| } |
| |
| btrfs_tree_lock(child); |
| btrfs_set_lock_blocking(child); |
| ret = btrfs_cow_block(trans, root, child, mid, 0, &child); |
| if (ret) { |
| btrfs_tree_unlock(child); |
| free_extent_buffer(child); |
| goto enospc; |
| } |
| |
| ret = tree_mod_log_insert_root(root->node, child, 1); |
| BUG_ON(ret < 0); |
| rcu_assign_pointer(root->node, child); |
| |
| add_root_to_dirty_list(root); |
| btrfs_tree_unlock(child); |
| |
| path->locks[level] = 0; |
| path->nodes[level] = NULL; |
| clean_tree_block(fs_info, mid); |
| btrfs_tree_unlock(mid); |
| /* once for the path */ |
| free_extent_buffer(mid); |
| |
| root_sub_used(root, mid->len); |
| btrfs_free_tree_block(trans, root, mid, 0, 1); |
| /* once for the root ptr */ |
| free_extent_buffer_stale(mid); |
| return 0; |
| } |
| if (btrfs_header_nritems(mid) > |
| BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) |
| return 0; |
| |
| left = read_node_slot(fs_info, parent, pslot - 1); |
| if (IS_ERR(left)) |
| left = NULL; |
| |
| if (left) { |
| btrfs_tree_lock(left); |
| btrfs_set_lock_blocking(left); |
| wret = btrfs_cow_block(trans, root, left, |
| parent, pslot - 1, &left); |
| if (wret) { |
| ret = wret; |
| goto enospc; |
| } |
| } |
| |
| right = read_node_slot(fs_info, parent, pslot + 1); |
| if (IS_ERR(right)) |
| right = NULL; |
| |
| if (right) { |
| btrfs_tree_lock(right); |
| btrfs_set_lock_blocking(right); |
| wret = btrfs_cow_block(trans, root, right, |
| parent, pslot + 1, &right); |
| if (wret) { |
| ret = wret; |
| goto enospc; |
| } |
| } |
| |
| /* first, try to make some room in the middle buffer */ |
| if (left) { |
| orig_slot += btrfs_header_nritems(left); |
| wret = push_node_left(trans, fs_info, left, mid, 1); |
| if (wret < 0) |
| ret = wret; |
| } |
| |
| /* |
| * then try to empty the right most buffer into the middle |
| */ |
| if (right) { |
| wret = push_node_left(trans, fs_info, mid, right, 1); |
| if (wret < 0 && wret != -ENOSPC) |
| ret = wret; |
| if (btrfs_header_nritems(right) == 0) { |
| clean_tree_block(fs_info, right); |
| btrfs_tree_unlock(right); |
| del_ptr(root, path, level + 1, pslot + 1); |
| root_sub_used(root, right->len); |
| btrfs_free_tree_block(trans, root, right, 0, 1); |
| free_extent_buffer_stale(right); |
| right = NULL; |
| } else { |
| struct btrfs_disk_key right_key; |
| btrfs_node_key(right, &right_key, 0); |
| ret = tree_mod_log_insert_key(parent, pslot + 1, |
| MOD_LOG_KEY_REPLACE, GFP_NOFS); |
| BUG_ON(ret < 0); |
| btrfs_set_node_key(parent, &right_key, pslot + 1); |
| btrfs_mark_buffer_dirty(parent); |
| } |
| } |
| if (btrfs_header_nritems(mid) == 1) { |
| /* |
| * we're not allowed to leave a node with one item in the |
| * tree during a delete. A deletion from lower in the tree |
| * could try to delete the only pointer in this node. |
| * So, pull some keys from the left. |
| * There has to be a left pointer at this point because |
| * otherwise we would have pulled some pointers from the |
| * right |
| */ |
| if (!left) { |
| ret = -EROFS; |
| btrfs_handle_fs_error(fs_info, ret, NULL); |
| goto enospc; |
| } |
| wret = balance_node_right(trans, fs_info, mid, left); |
| if (wret < 0) { |
| ret = wret; |
| goto enospc; |
| } |
| if (wret == 1) { |
| wret = push_node_left(trans, fs_info, left, mid, 1); |
| if (wret < 0) |
| ret = wret; |
| } |
| BUG_ON(wret == 1); |
| } |
| if (btrfs_header_nritems(mid) == 0) { |
| clean_tree_block(fs_info, mid); |
| btrfs_tree_unlock(mid); |
| del_ptr(root, path, level + 1, pslot); |
| root_sub_used(root, mid->len); |
| btrfs_free_tree_block(trans, root, mid, 0, 1); |
| free_extent_buffer_stale(mid); |
| mid = NULL; |
| } else { |
| /* update the parent key to reflect our changes */ |
| struct btrfs_disk_key mid_key; |
| btrfs_node_key(mid, &mid_key, 0); |
| ret = tree_mod_log_insert_key(parent, pslot, |
| MOD_LOG_KEY_REPLACE, GFP_NOFS); |
| BUG_ON(ret < 0); |
| btrfs_set_node_key(parent, &mid_key, pslot); |
| btrfs_mark_buffer_dirty(parent); |
| } |
| |
| /* update the path */ |
| if (left) { |
| if (btrfs_header_nritems(left) > orig_slot) { |
| extent_buffer_get(left); |
| /* left was locked after cow */ |
| path->nodes[level] = left; |
| path->slots[level + 1] -= 1; |
| path->slots[level] = orig_slot; |
| if (mid) { |
| btrfs_tree_unlock(mid); |
| free_extent_buffer(mid); |
| } |
| } else { |
| orig_slot -= btrfs_header_nritems(left); |
| path->slots[level] = orig_slot; |
| } |
| } |
| /* double check we haven't messed things up */ |
| if (orig_ptr != |
| btrfs_node_blockptr(path->nodes[level], path->slots[level])) |
| BUG(); |
| enospc: |
| if (right) { |
| btrfs_tree_unlock(right); |
| free_extent_buffer(right); |
| } |
| if (left) { |
| if (path->nodes[level] != left) |
| btrfs_tree_unlock(left); |
| free_extent_buffer(left); |
| } |
| return ret; |
| } |
| |
| /* Node balancing for insertion. Here we only split or push nodes around |
| * when they are completely full. This is also done top down, so we |
| * have to be pessimistic. |
| */ |
| static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, |
| struct btrfs_path *path, int level) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *right = NULL; |
| struct extent_buffer *mid; |
| struct extent_buffer *left = NULL; |
| struct extent_buffer *parent = NULL; |
| int ret = 0; |
| int wret; |
| int pslot; |
| int orig_slot = path->slots[level]; |
| |
| if (level == 0) |
| return 1; |
| |
| mid = path->nodes[level]; |
| WARN_ON(btrfs_header_generation(mid) != trans->transid); |
| |
| if (level < BTRFS_MAX_LEVEL - 1) { |
| parent = path->nodes[level + 1]; |
| pslot = path->slots[level + 1]; |
| } |
| |
| if (!parent) |
| return 1; |
| |
| left = read_node_slot(fs_info, parent, pslot - 1); |
| if (IS_ERR(left)) |
| left = NULL; |
| |
| /* first, try to make some room in the middle buffer */ |
| if (left) { |
| u32 left_nr; |
| |
| btrfs_tree_lock(left); |
| btrfs_set_lock_blocking(left); |
| |
| left_nr = btrfs_header_nritems(left); |
| if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { |
| wret = 1; |
| } else { |
| ret = btrfs_cow_block(trans, root, left, parent, |
| pslot - 1, &left); |
| if (ret) |
| wret = 1; |
| else { |
| wret = push_node_left(trans, fs_info, |
| left, mid, 0); |
| } |
| } |
| if (wret < 0) |
| ret = wret; |
| if (wret == 0) { |
| struct btrfs_disk_key disk_key; |
| orig_slot += left_nr; |
| btrfs_node_key(mid, &disk_key, 0); |
| ret = tree_mod_log_insert_key(parent, pslot, |
| MOD_LOG_KEY_REPLACE, GFP_NOFS); |
| BUG_ON(ret < 0); |
| btrfs_set_node_key(parent, &disk_key, pslot); |
| btrfs_mark_buffer_dirty(parent); |
| if (btrfs_header_nritems(left) > orig_slot) { |
| path->nodes[level] = left; |
| path->slots[level + 1] -= 1; |
| path->slots[level] = orig_slot; |
| btrfs_tree_unlock(mid); |
| free_extent_buffer(mid); |
| } else { |
| orig_slot -= |
| btrfs_header_nritems(left); |
| path->slots[level] = orig_slot; |
| btrfs_tree_unlock(left); |
| free_extent_buffer(left); |
| } |
| return 0; |
| } |
| btrfs_tree_unlock(left); |
| free_extent_buffer(left); |
| } |
| right = read_node_slot(fs_info, parent, pslot + 1); |
| if (IS_ERR(right)) |
| right = NULL; |
| |
| /* |
| * then try to empty the right most buffer into the middle |
| */ |
| if (right) { |
| u32 right_nr; |
| |
| btrfs_tree_lock(right); |
| btrfs_set_lock_blocking(right); |
| |
| right_nr = btrfs_header_nritems(right); |
| if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { |
| wret = 1; |
| } else { |
| ret = btrfs_cow_block(trans, root, right, |
| parent, pslot + 1, |
| &right); |
| if (ret) |
| wret = 1; |
| else { |
| wret = balance_node_right(trans, fs_info, |
| right, mid); |
| } |
| } |
| if (wret < 0) |
| ret = wret; |
| if (wret == 0) { |
| struct btrfs_disk_key disk_key; |
| |
| btrfs_node_key(right, &disk_key, 0); |
| ret = tree_mod_log_insert_key(parent, pslot + 1, |
| MOD_LOG_KEY_REPLACE, GFP_NOFS); |
| BUG_ON(ret < 0); |
| btrfs_set_node_key(parent, &disk_key, pslot + 1); |
| btrfs_mark_buffer_dirty(parent); |
| |
| if (btrfs_header_nritems(mid) <= orig_slot) { |
| path->nodes[level] = right; |
| path->slots[level + 1] += 1; |
| path->slots[level] = orig_slot - |
| btrfs_header_nritems(mid); |
| btrfs_tree_unlock(mid); |
| free_extent_buffer(mid); |
| } else { |
| btrfs_tree_unlock(right); |
| free_extent_buffer(right); |
| } |
| return 0; |
| } |
| btrfs_tree_unlock(right); |
| free_extent_buffer(right); |
| } |
| return 1; |
| } |
| |
| /* |
| * readahead one full node of leaves, finding things that are close |
| * to the block in 'slot', and triggering ra on them. |
| */ |
| static void reada_for_search(struct btrfs_fs_info *fs_info, |
| struct btrfs_path *path, |
| int level, int slot, u64 objectid) |
| { |
| struct extent_buffer *node; |
| struct btrfs_disk_key disk_key; |
| u32 nritems; |
| u64 search; |
| u64 target; |
| u64 nread = 0; |
| struct extent_buffer *eb; |
| u32 nr; |
| u32 blocksize; |
| u32 nscan = 0; |
| |
| if (level != 1) |
| return; |
| |
| if (!path->nodes[level]) |
| return; |
| |
| node = path->nodes[level]; |
| |
| search = btrfs_node_blockptr(node, slot); |
| blocksize = fs_info->nodesize; |
| eb = find_extent_buffer(fs_info, search); |
| if (eb) { |
| free_extent_buffer(eb); |
| return; |
| } |
| |
| target = search; |
| |
| nritems = btrfs_header_nritems(node); |
| nr = slot; |
| |
| while (1) { |
| if (path->reada == READA_BACK) { |
| if (nr == 0) |
| break; |
| nr--; |
| } else if (path->reada == READA_FORWARD) { |
| nr++; |
| if (nr >= nritems) |
| break; |
| } |
| if (path->reada == READA_BACK && objectid) { |
| btrfs_node_key(node, &disk_key, nr); |
| if (btrfs_disk_key_objectid(&disk_key) != objectid) |
| break; |
| } |
| search = btrfs_node_blockptr(node, nr); |
| if ((search <= target && target - search <= 65536) || |
| (search > target && search - target <= 65536)) { |
| readahead_tree_block(fs_info, search); |
| nread += blocksize; |
| } |
| nscan++; |
| if ((nread > 65536 || nscan > 32)) |
| break; |
| } |
| } |
| |
| static noinline void reada_for_balance(struct btrfs_fs_info *fs_info, |
| struct btrfs_path *path, int level) |
| { |
| int slot; |
| int nritems; |
| struct extent_buffer *parent; |
| struct extent_buffer *eb; |
| u64 gen; |
| u64 block1 = 0; |
| u64 block2 = 0; |
| |
| parent = path->nodes[level + 1]; |
| if (!parent) |
| return; |
| |
| nritems = btrfs_header_nritems(parent); |
| slot = path->slots[level + 1]; |
| |
| if (slot > 0) { |
| block1 = btrfs_node_blockptr(parent, slot - 1); |
| gen = btrfs_node_ptr_generation(parent, slot - 1); |
| eb = find_extent_buffer(fs_info, block1); |
| /* |
| * if we get -eagain from btrfs_buffer_uptodate, we |
| * don't want to return eagain here. That will loop |
| * forever |
| */ |
| if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) |
| block1 = 0; |
| free_extent_buffer(eb); |
| } |
| if (slot + 1 < nritems) { |
| block2 = btrfs_node_blockptr(parent, slot + 1); |
| gen = btrfs_node_ptr_generation(parent, slot + 1); |
| eb = find_extent_buffer(fs_info, block2); |
| if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) |
| block2 = 0; |
| free_extent_buffer(eb); |
| } |
| |
| if (block1) |
| readahead_tree_block(fs_info, block1); |
| if (block2) |
| readahead_tree_block(fs_info, block2); |
| } |
| |
| |
| /* |
| * when we walk down the tree, it is usually safe to unlock the higher layers |
| * in the tree. The exceptions are when our path goes through slot 0, because |
| * operations on the tree might require changing key pointers higher up in the |
| * tree. |
| * |
| * callers might also have set path->keep_locks, which tells this code to keep |
| * the lock if the path points to the last slot in the block. This is part of |
| * walking through the tree, and selecting the next slot in the higher block. |
| * |
| * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so |
| * if lowest_unlock is 1, level 0 won't be unlocked |
| */ |
| static noinline void unlock_up(struct btrfs_path *path, int level, |
| int lowest_unlock, int min_write_lock_level, |
| int *write_lock_level) |
| { |
| int i; |
| int skip_level = level; |
| int no_skips = 0; |
| struct extent_buffer *t; |
| |
| for (i = level; i < BTRFS_MAX_LEVEL; i++) { |
| if (!path->nodes[i]) |
| break; |
| if (!path->locks[i]) |
| break; |
| if (!no_skips && path->slots[i] == 0) { |
| skip_level = i + 1; |
| continue; |
| } |
| if (!no_skips && path->keep_locks) { |
| u32 nritems; |
| t = path->nodes[i]; |
| nritems = btrfs_header_nritems(t); |
| if (nritems < 1 || path->slots[i] >= nritems - 1) { |
| skip_level = i + 1; |
| continue; |
| } |
| } |
| if (skip_level < i && i >= lowest_unlock) |
| no_skips = 1; |
| |
| t = path->nodes[i]; |
| if (i >= lowest_unlock && i > skip_level) { |
| btrfs_tree_unlock_rw(t, path->locks[i]); |
| path->locks[i] = 0; |
| if (write_lock_level && |
| i > min_write_lock_level && |
| i <= *write_lock_level) { |
| *write_lock_level = i - 1; |
| } |
| } |
| } |
| } |
| |
| /* |
| * This releases any locks held in the path starting at level and |
| * going all the way up to the root. |
| * |
| * btrfs_search_slot will keep the lock held on higher nodes in a few |
| * corner cases, such as COW of the block at slot zero in the node. This |
| * ignores those rules, and it should only be called when there are no |
| * more updates to be done higher up in the tree. |
| */ |
| noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) |
| { |
| int i; |
| |
| if (path->keep_locks) |
| return; |
| |
| for (i = level; i < BTRFS_MAX_LEVEL; i++) { |
| if (!path->nodes[i]) |
| continue; |
| if (!path->locks[i]) |
| continue; |
| btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); |
| path->locks[i] = 0; |
| } |
| } |
| |
| /* |
| * helper function for btrfs_search_slot. The goal is to find a block |
| * in cache without setting the path to blocking. If we find the block |
| * we return zero and the path is unchanged. |
| * |
| * If we can't find the block, we set the path blocking and do some |
| * reada. -EAGAIN is returned and the search must be repeated. |
| */ |
| static int |
| read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, |
| struct extent_buffer **eb_ret, int level, int slot, |
| const struct btrfs_key *key) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| u64 blocknr; |
| u64 gen; |
| struct extent_buffer *b = *eb_ret; |
| struct extent_buffer *tmp; |
| struct btrfs_key first_key; |
| int ret; |
| int parent_level; |
| |
| blocknr = btrfs_node_blockptr(b, slot); |
| gen = btrfs_node_ptr_generation(b, slot); |
| parent_level = btrfs_header_level(b); |
| btrfs_node_key_to_cpu(b, &first_key, slot); |
| |
| tmp = find_extent_buffer(fs_info, blocknr); |
| if (tmp) { |
| /* first we do an atomic uptodate check */ |
| if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { |
| /* |
| * Do extra check for first_key, eb can be stale due to |
| * being cached, read from scrub, or have multiple |
| * parents (shared tree blocks). |
| */ |
| if (btrfs_verify_level_key(fs_info, tmp, |
| parent_level - 1, &first_key, gen)) { |
| free_extent_buffer(tmp); |
| return -EUCLEAN; |
| } |
| *eb_ret = tmp; |
| return 0; |
| } |
| |
| /* the pages were up to date, but we failed |
| * the generation number check. Do a full |
| * read for the generation number that is correct. |
| * We must do this without dropping locks so |
| * we can trust our generation number |
| */ |
| btrfs_set_path_blocking(p); |
| |
| /* now we're allowed to do a blocking uptodate check */ |
| ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key); |
| if (!ret) { |
| *eb_ret = tmp; |
| return 0; |
| } |
| free_extent_buffer(tmp); |
| btrfs_release_path(p); |
| return -EIO; |
| } |
| |
| /* |
| * reduce lock contention at high levels |
| * of the btree by dropping locks before |
| * we read. Don't release the lock on the current |
| * level because we need to walk this node to figure |
| * out which blocks to read. |
| */ |
| btrfs_unlock_up_safe(p, level + 1); |
| btrfs_set_path_blocking(p); |
| |
| if (p->reada != READA_NONE) |
| reada_for_search(fs_info, p, level, slot, key->objectid); |
| |
| ret = -EAGAIN; |
| tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1, |
| &first_key); |
| if (!IS_ERR(tmp)) { |
| /* |
| * If the read above didn't mark this buffer up to date, |
| * it will never end up being up to date. Set ret to EIO now |
| * and give up so that our caller doesn't loop forever |
| * on our EAGAINs. |
| */ |
| if (!extent_buffer_uptodate(tmp)) |
| ret = -EIO; |
| free_extent_buffer(tmp); |
| } else { |
| ret = PTR_ERR(tmp); |
| } |
| |
| btrfs_release_path(p); |
| return ret; |
| } |
| |
| /* |
| * helper function for btrfs_search_slot. This does all of the checks |
| * for node-level blocks and does any balancing required based on |
| * the ins_len. |
| * |
| * If no extra work was required, zero is returned. If we had to |
| * drop the path, -EAGAIN is returned and btrfs_search_slot must |
| * start over |
| */ |
| static int |
| setup_nodes_for_search(struct btrfs_trans_handle *trans, |
| struct btrfs_root *root, struct btrfs_path *p, |
| struct extent_buffer *b, int level, int ins_len, |
| int *write_lock_level) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| int ret; |
| |
| if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= |
| BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { |
| int sret; |
| |
| if (*write_lock_level < level + 1) { |
| *write_lock_level = level + 1; |
| btrfs_release_path(p); |
| goto again; |
| } |
| |
| btrfs_set_path_blocking(p); |
| reada_for_balance(fs_info, p, level); |
| sret = split_node(trans, root, p, level); |
| btrfs_clear_path_blocking(p, NULL, 0); |
| |
| BUG_ON(sret > 0); |
| if (sret) { |
| ret = sret; |
| goto done; |
| } |
| b = p->nodes[level]; |
| } else if (ins_len < 0 && btrfs_header_nritems(b) < |
| BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { |
| int sret; |
| |
| if (*write_lock_level < level + 1) { |
| *write_lock_level = level + 1; |
| btrfs_release_path(p); |
| goto again; |
| } |
| |
| btrfs_set_path_blocking(p); |
| reada_for_balance(fs_info, p, level); |
| sret = balance_level(trans, root, p, level); |
| btrfs_clear_path_blocking(p, NULL, 0); |
| |
| if (sret) { |
| ret = sret; |
| goto done; |
| } |
| b = p->nodes[level]; |
| if (!b) { |
| btrfs_release_path(p); |
| goto again; |
| } |
| BUG_ON(btrfs_header_nritems(b) == 1); |
| } |
| return 0; |
| |
| again: |
| ret = -EAGAIN; |
| done: |
| return ret; |
| } |
| |
| static void key_search_validate(struct extent_buffer *b, |
| const struct btrfs_key *key, |
| int level) |
| { |
| #ifdef CONFIG_BTRFS_ASSERT |
| struct btrfs_disk_key disk_key; |
| |
| btrfs_cpu_key_to_disk(&disk_key, key); |
| |
| if (level == 0) |
| ASSERT(!memcmp_extent_buffer(b, &disk_key, |
| offsetof(struct btrfs_leaf, items[0].key), |
| sizeof(disk_key))); |
| else |
| ASSERT(!memcmp_extent_buffer(b, &disk_key, |
| offsetof(struct btrfs_node, ptrs[0].key), |
| sizeof(disk_key))); |
| #endif |
| } |
| |
| static int key_search(struct extent_buffer *b, const struct btrfs_key *key, |
| int level, int *prev_cmp, int *slot) |
| { |
| if (*prev_cmp != 0) { |
| *prev_cmp = btrfs_bin_search(b, key, level, slot); |
| return *prev_cmp; |
| } |
| |
| key_search_validate(b, key, level); |
| *slot = 0; |
| |
| return 0; |
| } |
| |
| int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, |
| u64 iobjectid, u64 ioff, u8 key_type, |
| struct btrfs_key *found_key) |
| { |
| int ret; |
| struct btrfs_key key; |
| struct extent_buffer *eb; |
| |
| ASSERT(path); |
| ASSERT(found_key); |
| |
| key.type = key_type; |
| key.objectid = iobjectid; |
| key.offset = ioff; |
| |
| ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); |
| if (ret < 0) |
| return ret; |
| |
| eb = path->nodes[0]; |
| if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { |
| ret = btrfs_next_leaf(fs_root, path); |
| if (ret) |
| return ret; |
| eb = path->nodes[0]; |
| } |
| |
| btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); |
| if (found_key->type != key.type || |
| found_key->objectid != key.objectid) |
| return 1; |
| |
| return 0; |
| } |
| |
| static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, |
| struct btrfs_path *p, |
| int write_lock_level) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *b; |
| int root_lock; |
| int level = 0; |
| |
| /* We try very hard to do read locks on the root */ |
| root_lock = BTRFS_READ_LOCK; |
| |
| if (p->search_commit_root) { |
| /* |
| * The commit roots are read only so we always do read locks, |
| * and we always must hold the commit_root_sem when doing |
| * searches on them, the only exception is send where we don't |
| * want to block transaction commits for a long time, so |
| * we need to clone the commit root in order to avoid races |
| * with transaction commits that create a snapshot of one of |
| * the roots used by a send operation. |
| */ |
| if (p->need_commit_sem) { |
| down_read(&fs_info->commit_root_sem); |
| b = btrfs_clone_extent_buffer(root->commit_root); |
| up_read(&fs_info->commit_root_sem); |
| if (!b) |
| return ERR_PTR(-ENOMEM); |
| |
| } else { |
| b = root->commit_root; |
| extent_buffer_get(b); |
| } |
| level = btrfs_header_level(b); |
| /* |
| * Ensure that all callers have set skip_locking when |
| * p->search_commit_root = 1. |
| */ |
| ASSERT(p->skip_locking == 1); |
| |
| goto out; |
| } |
| |
| if (p->skip_locking) { |
| b = btrfs_root_node(root); |
| level = btrfs_header_level(b); |
| goto out; |
| } |
| |
| /* |
| * If the level is set to maximum, we can skip trying to get the read |
| * lock. |
| */ |
| if (write_lock_level < BTRFS_MAX_LEVEL) { |
| /* |
| * We don't know the level of the root node until we actually |
| * have it read locked |
| */ |
| b = btrfs_read_lock_root_node(root); |
| level = btrfs_header_level(b); |
| if (level > write_lock_level) |
| goto out; |
| |
| /* Whoops, must trade for write lock */ |
| btrfs_tree_read_unlock(b); |
| free_extent_buffer(b); |
| } |
| |
| b = btrfs_lock_root_node(root); |
| root_lock = BTRFS_WRITE_LOCK; |
| |
| /* The level might have changed, check again */ |
| level = btrfs_header_level(b); |
| |
| out: |
| p->nodes[level] = b; |
| if (!p->skip_locking) |
| p->locks[level] = root_lock; |
| /* |
| * Callers are responsible for dropping b's references. |
| */ |
| return b; |
| } |
| |
| |
| /* |
| * btrfs_search_slot - look for a key in a tree and perform necessary |
| * modifications to preserve tree invariants. |
| * |
| * @trans: Handle of transaction, used when modifying the tree |
| * @p: Holds all btree nodes along the search path |
| * @root: The root node of the tree |
| * @key: The key we are looking for |
| * @ins_len: Indicates purpose of search, for inserts it is 1, for |
| * deletions it's -1. 0 for plain searches |
| * @cow: boolean should CoW operations be performed. Must always be 1 |
| * when modifying the tree. |
| * |
| * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. |
| * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) |
| * |
| * If @key is found, 0 is returned and you can find the item in the leaf level |
| * of the path (level 0) |
| * |
| * If @key isn't found, 1 is returned and the leaf level of the path (level 0) |
| * points to the slot where it should be inserted |
| * |
| * If an error is encountered while searching the tree a negative error number |
| * is returned |
| */ |
| int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
| const struct btrfs_key *key, struct btrfs_path *p, |
| int ins_len, int cow) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *b; |
| int slot; |
| int ret; |
| int err; |
| int level; |
| int lowest_unlock = 1; |
| /* everything at write_lock_level or lower must be write locked */ |
| int write_lock_level = 0; |
| u8 lowest_level = 0; |
| int min_write_lock_level; |
| int prev_cmp; |
| |
| lowest_level = p->lowest_level; |
| WARN_ON(lowest_level && ins_len > 0); |
| WARN_ON(p->nodes[0] != NULL); |
| BUG_ON(!cow && ins_len); |
| |
| if (ins_len < 0) { |
| lowest_unlock = 2; |
| |
| /* when we are removing items, we might have to go up to level |
| * two as we update tree pointers Make sure we keep write |
| * for those levels as well |
| */ |
| write_lock_level = 2; |
| } else if (ins_len > 0) { |
| /* |
| * for inserting items, make sure we have a write lock on |
| * level 1 so we can update keys |
| */ |
| write_lock_level = 1; |
| } |
| |
| if (!cow) |
| write_lock_level = -1; |
| |
| if (cow && (p->keep_locks || p->lowest_level)) |
| write_lock_level = BTRFS_MAX_LEVEL; |
| |
| min_write_lock_level = write_lock_level; |
| |
| again: |
| prev_cmp = -1; |
| b = btrfs_search_slot_get_root(root, p, write_lock_level); |
| if (IS_ERR(b)) { |
| ret = PTR_ERR(b); |
| goto done; |
| } |
| |
| while (b) { |
| level = btrfs_header_level(b); |
| |
| /* |
| * setup the path here so we can release it under lock |
| * contention with the cow code |
| */ |
| if (cow) { |
| bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); |
| |
| /* |
| * if we don't really need to cow this block |
| * then we don't want to set the path blocking, |
| * so we test it here |
| */ |
| if (!should_cow_block(trans, root, b)) { |
| trans->dirty = true; |
| goto cow_done; |
| } |
| |
| /* |
| * must have write locks on this node and the |
| * parent |
| */ |
| if (level > write_lock_level || |
| (level + 1 > write_lock_level && |
| level + 1 < BTRFS_MAX_LEVEL && |
| p->nodes[level + 1])) { |
| write_lock_level = level + 1; |
| btrfs_release_path(p); |
| goto again; |
| } |
| |
| btrfs_set_path_blocking(p); |
| if (last_level) |
| err = btrfs_cow_block(trans, root, b, NULL, 0, |
| &b); |
| else |
| err = btrfs_cow_block(trans, root, b, |
| p->nodes[level + 1], |
| p->slots[level + 1], &b); |
| if (err) { |
| ret = err; |
| goto done; |
| } |
| } |
| cow_done: |
| p->nodes[level] = b; |
| btrfs_clear_path_blocking(p, NULL, 0); |
| |
| /* |
| * we have a lock on b and as long as we aren't changing |
| * the tree, there is no way to for the items in b to change. |
| * It is safe to drop the lock on our parent before we |
| * go through the expensive btree search on b. |
| * |
| * If we're inserting or deleting (ins_len != 0), then we might |
| * be changing slot zero, which may require changing the parent. |
| * So, we can't drop the lock until after we know which slot |
| * we're operating on. |
| */ |
| if (!ins_len && !p->keep_locks) { |
| int u = level + 1; |
| |
| if (u < BTRFS_MAX_LEVEL && p->locks[u]) { |
| btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); |
| p->locks[u] = 0; |
| } |
| } |
| |
| ret = key_search(b, key, level, &prev_cmp, &slot); |
| if (ret < 0) |
| goto done; |
| |
| if (level != 0) { |
| int dec = 0; |
| if (ret && slot > 0) { |
| dec = 1; |
| slot -= 1; |
| } |
| p->slots[level] = slot; |
| err = setup_nodes_for_search(trans, root, p, b, level, |
| ins_len, &write_lock_level); |
| if (err == -EAGAIN) |
| goto again; |
| if (err) { |
| ret = err; |
| goto done; |
| } |
| b = p->nodes[level]; |
| slot = p->slots[level]; |
| |
| /* |
| * slot 0 is special, if we change the key |
| * we have to update the parent pointer |
| * which means we must have a write lock |
| * on the parent |
| */ |
| if (slot == 0 && ins_len && |
| write_lock_level < level + 1) { |
| write_lock_level = level + 1; |
| btrfs_release_path(p); |
| goto again; |
| } |
| |
| unlock_up(p, level, lowest_unlock, |
| min_write_lock_level, &write_lock_level); |
| |
| if (level == lowest_level) { |
| if (dec) |
| p->slots[level]++; |
| goto done; |
| } |
| |
| err = read_block_for_search(root, p, &b, level, |
| slot, key); |
| if (err == -EAGAIN) |
| goto again; |
| if (err) { |
| ret = err; |
| goto done; |
| } |
| |
| if (!p->skip_locking) { |
| level = btrfs_header_level(b); |
| if (level <= write_lock_level) { |
| err = btrfs_try_tree_write_lock(b); |
| if (!err) { |
| btrfs_set_path_blocking(p); |
| btrfs_tree_lock(b); |
| btrfs_clear_path_blocking(p, b, |
| BTRFS_WRITE_LOCK); |
| } |
| p->locks[level] = BTRFS_WRITE_LOCK; |
| } else { |
| err = btrfs_tree_read_lock_atomic(b); |
| if (!err) { |
| btrfs_set_path_blocking(p); |
| btrfs_tree_read_lock(b); |
| btrfs_clear_path_blocking(p, b, |
| BTRFS_READ_LOCK); |
| } |
| p->locks[level] = BTRFS_READ_LOCK; |
| } |
| p->nodes[level] = b; |
| } |
| } else { |
| p->slots[level] = slot; |
| if (ins_len > 0 && |
| btrfs_leaf_free_space(fs_info, b) < ins_len) { |
| if (write_lock_level < 1) { |
| write_lock_level = 1; |
| btrfs_release_path(p); |
| goto again; |
| } |
| |
| btrfs_set_path_blocking(p); |
| err = split_leaf(trans, root, key, |
| p, ins_len, ret == 0); |
| btrfs_clear_path_blocking(p, NULL, 0); |
| |
| BUG_ON(err > 0); |
| if (err) { |
| ret = err; |
| goto done; |
| } |
| } |
| if (!p->search_for_split) |
| unlock_up(p, level, lowest_unlock, |
| min_write_lock_level, &write_lock_level); |
| goto done; |
| } |
| } |
| ret = 1; |
| done: |
| /* |
| * we don't really know what they plan on doing with the path |
| * from here on, so for now just mark it as blocking |
| */ |
| if (!p->leave_spinning) |
| btrfs_set_path_blocking(p); |
| if (ret < 0 && !p->skip_release_on_error) |
| btrfs_release_path(p); |
| return ret; |
| } |
| |
| /* |
| * Like btrfs_search_slot, this looks for a key in the given tree. It uses the |
| * current state of the tree together with the operations recorded in the tree |
| * modification log to search for the key in a previous version of this tree, as |
| * denoted by the time_seq parameter. |
| * |
| * Naturally, there is no support for insert, delete or cow operations. |
| * |
| * The resulting path and return value will be set up as if we called |
| * btrfs_search_slot at that point in time with ins_len and cow both set to 0. |
| */ |
| int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, |
| struct btrfs_path *p, u64 time_seq) |
| { |
| struct btrfs_fs_info *fs_info = root->fs_info; |
| struct extent_buffer *b; |
| int slot; |
| int ret; |
| int err; |
| int level; |
| int lowest_unlock = 1; |
| u8 lowest_level = 0; |
| int prev_cmp = -1; |
| |
| lowest_level = p->lowest_level; |
| WARN_ON(p->nodes[0] != NULL); |
| |
| if (p->search_commit_root) { |
| BUG_ON(time_seq); |
| return btrfs_search_slot(NULL, root, key, p, 0, 0); |
| } |
| |
| again: |
| b = get_old_root(root, time_seq); |
| if (!b) { |
| ret = -EIO; |
| goto done; |
| } |
| level = btrfs_header_level(b); |
| p->locks[level] = BTRFS_READ_LOCK; |
| |
| while (b) { |
| level = btrfs_header_level(b); |
| p->nodes[level] = b; |
| btrfs_clear_path_blocking(p, NULL, 0); |
| |
| /* |
| * we have a lock on b and as long as we aren't changing |
| * the tree, there is no way to for the items in b to change. |
| * It is safe to drop the lock on our parent before we |
| * go through the expensive btree search on b. |
| */ |
| btrfs_unlock_up_safe(p, level + 1); |
| |
| /* |
| * Since we can unwind ebs we want to do a real search every |
| * time. |
| */ |
| prev_cmp = -1; |
| ret = key_search(b, key, level, &prev_cmp, &slot); |
| |
| if (level != 0) { |
| int dec = 0; |
| if (ret && slot > 0) { |
| dec = 1; |
| slot -= 1; |
| } |
| p->slots[level] = slot; |
| unlock_up(p, level, lowest_unlock, 0, NULL); |
| |
| if (level == lowest_level) { |
| if (dec) |
| p->slots[level]++; |
| goto done; |
| } |
| |
| err = read_block_for_search(root, p, &b, level, |
| slot, key); |
| if (err == -EAGAIN) |
| goto again; |
| if (err) { |
| ret = err; |
| goto done; |
| } |
| |
| level = btrfs_header_level(b); |
| err = btrfs_tree_read_lock_atomic(b); |
| if (!err) { |
| btrfs_set_path_blocking(p); |
| btrfs_tree_read_lock(b); |
| btrfs_clear_path_blocking(p, b, |
| BTRFS_READ_LOCK); |
| } |
| b = tree_mod_log_rewind(fs_info, p, b, time_seq); |
| if (!b) { |
|