summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.c
diff options
context:
space:
mode:
authorYan Zheng <zheng.yan@oracle.com>2009-06-10 10:45:14 -0400
committerChris Mason <chris.mason@oracle.com>2009-06-10 11:29:46 -0400
commit5d4f98a28c7d334091c1b7744f48a1acdd2a4ae0 (patch)
treec611d7d824cbcdb777dd2d8e33e2ed1c5df8a9c6 /fs/btrfs/ctree.c
parent5c939df56c3ea018b58e5aa76181284c2053d699 (diff)
downloadlinux-5d4f98a28c7d334091c1b7744f48a1acdd2a4ae0.tar.bz2
Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
This commit introduces a new kind of back reference for btrfs metadata. Once a filesystem has been mounted with this commit, IT WILL NO LONGER BE MOUNTABLE BY OLDER KERNELS. When a tree block in subvolume tree is cow'd, the reference counts of all extents it points to are increased by one. At transaction commit time, the old root of the subvolume is recorded in a "dead root" data structure, and the btree it points to is later walked, dropping reference counts and freeing any blocks where the reference count goes to 0. The increments done during cow and decrements done after commit cancel out, and the walk is a very expensive way to go about freeing the blocks that are no longer referenced by the new btree root. This commit reduces the transaction overhead by avoiding the need for dead root records. When a non-shared tree block is cow'd, we free the old block at once, and the new block inherits old block's references. When a tree block with reference count > 1 is cow'd, we increase the reference counts of all extents the new block points to by one, and decrease the old block's reference count by one. This dead tree avoidance code removes the need to modify the reference counts of lower level extents when a non-shared tree block is cow'd. But we still need to update back ref for all pointers in the block. This is because the location of the block is recorded in the back ref item. We can solve this by introducing a new type of back ref. The new back ref provides information about pointer's key, level and in which tree the pointer lives. This information allow us to find the pointer by searching the tree. The shortcoming of the new back ref is that it only works for pointers in tree blocks referenced by their owner trees. This is mostly a problem for snapshots, where resolving one of these fuzzy back references would be O(number_of_snapshots) and quite slow. The solution used here is to use the fuzzy back references in the common case where a given tree block is only referenced by one root, and use the full back references when multiple roots have a reference on a given block. This commit adds per subvolume red-black tree to keep trace of cached inodes. The red-black tree helps the balancing code to find cached inodes whose inode numbers within a given range. This commit improves the balancing code by introducing several data structures to keep the state of balancing. The most important one is the back ref cache. It caches how the upper level tree blocks are referenced. This greatly reduce the overhead of checking back ref. The improved balancing code scales significantly better with a large number of snapshots. This is a very large commit and was written in a number of pieces. But, they depend heavily on the disk format change and were squashed together to make sure git bisect didn't end up in a bad state wrt space balancing or the format change. Signed-off-by: Yan Zheng <zheng.yan@oracle.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.c')
-rw-r--r--fs/btrfs/ctree.c685
1 files changed, 316 insertions, 369 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index fedf8b9f03a2..2b960278a2f9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -197,14 +197,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
u32 nritems;
int ret = 0;
int level;
- struct btrfs_root *new_root;
-
- new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
- if (!new_root)
- return -ENOMEM;
-
- memcpy(new_root, root, sizeof(*new_root));
- new_root->root_key.objectid = new_root_objectid;
+ struct btrfs_disk_key disk_key;
WARN_ON(root->ref_cows && trans->transid !=
root->fs_info->running_transaction->transid);
@@ -212,28 +205,37 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
level = btrfs_header_level(buf);
nritems = btrfs_header_nritems(buf);
+ if (level == 0)
+ btrfs_item_key(buf, &disk_key, 0);
+ else
+ btrfs_node_key(buf, &disk_key, 0);
- cow = btrfs_alloc_free_block(trans, new_root, buf->len, 0,
- new_root_objectid, trans->transid,
- level, buf->start, 0);
- if (IS_ERR(cow)) {
- kfree(new_root);
+ cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
+ new_root_objectid, &disk_key, level,
+ buf->start, 0);
+ if (IS_ERR(cow))
return PTR_ERR(cow);
- }
copy_extent_buffer(cow, buf, 0, 0, cow->len);
btrfs_set_header_bytenr(cow, cow->start);
btrfs_set_header_generation(cow, trans->transid);
- btrfs_set_header_owner(cow, new_root_objectid);
- btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
+ btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
+ btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
+ BTRFS_HEADER_FLAG_RELOC);
+ if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
+ else
+ btrfs_set_header_owner(cow, new_root_objectid);
write_extent_buffer(cow, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(cow),
BTRFS_FSID_SIZE);
WARN_ON(btrfs_header_generation(buf) > trans->transid);
- ret = btrfs_inc_ref(trans, new_root, buf, cow, NULL);
- kfree(new_root);
+ if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ ret = btrfs_inc_ref(trans, root, cow, 1);
+ else
+ ret = btrfs_inc_ref(trans, root, cow, 0);
if (ret)
return ret;
@@ -244,6 +246,125 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
}
/*
+ * check if the tree block can be shared by multiple trees
+ */
+int btrfs_block_can_be_shared(struct btrfs_root *root,
+ struct extent_buffer *buf)
+{
+ /*
+ * Tree blocks not in refernece counted trees and tree roots
+ * are never shared. If a block was allocated after the last
+ * snapshot and the block was not allocated by tree relocation,
+ * we know the block is not shared.
+ */
+ if (root->ref_cows &&
+ buf != root->node && buf != root->commit_root &&
+ (btrfs_header_generation(buf) <=
+ btrfs_root_last_snapshot(&root->root_item) ||
+ btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
+ return 1;
+#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
+ if (root->ref_cows &&
+ btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
+ return 1;
+#endif
+ return 0;
+}
+
+static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf,
+ struct extent_buffer *cow)
+{
+ u64 refs;
+ u64 owner;
+ u64 flags;
+ u64 new_flags = 0;
+ int ret;
+
+ /*
+ * Backrefs update rules:
+ *
+ * Always use full backrefs for extent pointers in tree block
+ * allocated by tree relocation.
+ *
+ * If a shared tree block is no longer referenced by its owner
+ * tree (btrfs_header_owner(buf) == root->root_key.objectid),
+ * use full backrefs for extent pointers in tree block.
+ *
+ * If a tree block is been relocating
+ * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
+ * use full backrefs for extent pointers in tree block.
+ * The reason for this is some operations (such as drop tree)
+ * are only allowed for blocks use full backrefs.
+ */
+
+ if (btrfs_block_can_be_shared(root, buf)) {
+ ret = btrfs_lookup_extent_info(trans, root, buf->start,
+ buf->len, &refs, &flags);
+ BUG_ON(ret);
+ BUG_ON(refs == 0);
+ } else {
+ refs = 1;
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
+ flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ else
+ flags = 0;
+ }
+
+ owner = btrfs_header_owner(buf);
+ BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
+ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+
+ if (refs > 1) {
+ if ((owner == root->root_key.objectid ||
+ root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
+ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
+ ret = btrfs_inc_ref(trans, root, buf, 1);
+ BUG_ON(ret);
+
+ if (root->root_key.objectid ==
+ BTRFS_TREE_RELOC_OBJECTID) {
+ ret = btrfs_dec_ref(trans, root, buf, 0);
+ BUG_ON(ret);
+ ret = btrfs_inc_ref(trans, root, cow, 1);
+ BUG_ON(ret);
+ }
+ new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ } else {
+
+ if (root->root_key.objectid ==
+ BTRFS_TREE_RELOC_OBJECTID)
+ ret = btrfs_inc_ref(trans, root, cow, 1);
+ else
+ ret = btrfs_inc_ref(trans, root, cow, 0);
+ BUG_ON(ret);
+ }
+ if (new_flags != 0) {
+ ret = btrfs_set_disk_extent_flags(trans, root,
+ buf->start,
+ buf->len,
+ new_flags, 0);
+ BUG_ON(ret);
+ }
+ } else {
+ if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
+ if (root->root_key.objectid ==
+ BTRFS_TREE_RELOC_OBJECTID)
+ ret = btrfs_inc_ref(trans, root, cow, 1);
+ else
+ ret = btrfs_inc_ref(trans, root, cow, 0);
+ BUG_ON(ret);
+ ret = btrfs_dec_ref(trans, root, buf, 1);
+ BUG_ON(ret);
+ }
+ clean_tree_block(trans, root, buf);
+ }
+ return 0;
+}
+
+/*
* does the dirty work in cow of a single block. The parent block (if
* supplied) is updated to point to the new cow copy. The new buffer is marked
* dirty and returned locked. If you modify the block it needs to be marked
@@ -262,34 +383,39 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret,
u64 search_start, u64 empty_size)
{
- u64 parent_start;
+ struct btrfs_disk_key disk_key;
struct extent_buffer *cow;
- u32 nritems;
- int ret = 0;
int level;
int unlock_orig = 0;
+ u64 parent_start;
if (*cow_ret == buf)
unlock_orig = 1;
btrfs_assert_tree_locked(buf);
- if (parent)
- parent_start = parent->start;
- else
- parent_start = 0;
-
WARN_ON(root->ref_cows && trans->transid !=
root->fs_info->running_transaction->transid);
WARN_ON(root->ref_cows && trans->transid != root->last_trans);
level = btrfs_header_level(buf);
- nritems = btrfs_header_nritems(buf);
- cow = btrfs_alloc_free_block(trans, root, buf->len,
- parent_start, root->root_key.objectid,
- trans->transid, level,
- search_start, empty_size);
+ if (level == 0)
+ btrfs_item_key(buf, &disk_key, 0);
+ else
+ btrfs_node_key(buf, &disk_key, 0);
+
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (parent)
+ parent_start = parent->start;
+ else
+ parent_start = 0;
+ } else
+ parent_start = 0;
+
+ cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
+ root->root_key.objectid, &disk_key,
+ level, search_start, empty_size);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -298,83 +424,53 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
copy_extent_buffer(cow, buf, 0, 0, cow->len);
btrfs_set_header_bytenr(cow, cow->start);
btrfs_set_header_generation(cow, trans->transid);
- btrfs_set_header_owner(cow, root->root_key.objectid);
- btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
+ btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
+ btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
+ BTRFS_HEADER_FLAG_RELOC);
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
+ else
+ btrfs_set_header_owner(cow, root->root_key.objectid);
write_extent_buffer(cow, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(cow),
BTRFS_FSID_SIZE);
- WARN_ON(btrfs_header_generation(buf) > trans->transid);
- if (btrfs_header_generation(buf) != trans->transid) {
- u32 nr_extents;
- ret = btrfs_inc_ref(trans, root, buf, cow, &nr_extents);
- if (ret)
- return ret;
-
- ret = btrfs_cache_ref(trans, root, buf, nr_extents);
- WARN_ON(ret);
- } else if (btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID) {
- /*
- * There are only two places that can drop reference to
- * tree blocks owned by living reloc trees, one is here,
- * the other place is btrfs_drop_subtree. In both places,
- * we check reference count while tree block is locked.
- * Furthermore, if reference count is one, it won't get
- * increased by someone else.
- */
- u32 refs;
- ret = btrfs_lookup_extent_ref(trans, root, buf->start,
- buf->len, &refs);
- BUG_ON(ret);
- if (refs == 1) {
- ret = btrfs_update_ref(trans, root, buf, cow,
- 0, nritems);
- clean_tree_block(trans, root, buf);
- } else {
- ret = btrfs_inc_ref(trans, root, buf, cow, NULL);
- }
- BUG_ON(ret);
- } else {
- ret = btrfs_update_ref(trans, root, buf, cow, 0, nritems);
- if (ret)
- return ret;
- clean_tree_block(trans, root, buf);
- }
-
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
- ret = btrfs_reloc_tree_cache_ref(trans, root, cow, buf->start);
- WARN_ON(ret);
- }
+ update_ref_for_cow(trans, root, buf, cow);
if (buf == root->node) {
WARN_ON(parent && parent != buf);
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
+ parent_start = buf->start;
+ else
+ parent_start = 0;
spin_lock(&root->node_lock);
root->node = cow;
extent_buffer_get(cow);
spin_unlock(&root->node_lock);
- if (buf != root->commit_root) {
- btrfs_free_extent(trans, root, buf->start,
- buf->len, buf->start,
- root->root_key.objectid,
- btrfs_header_generation(buf),
- level, 1);
- }
+ btrfs_free_extent(trans, root, buf->start, buf->len,
+ parent_start, root->root_key.objectid,
+ level, 0);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ parent_start = parent->start;
+ else
+ parent_start = 0;
+
+ WARN_ON(trans->transid != btrfs_header_generation(parent));
btrfs_set_node_blockptr(parent, parent_slot,
cow->start);
- WARN_ON(trans->transid == 0);
btrfs_set_node_ptr_generation(parent, parent_slot,
trans->transid);
btrfs_mark_buffer_dirty(parent);
- WARN_ON(btrfs_header_generation(parent) != trans->transid);
btrfs_free_extent(trans, root, buf->start, buf->len,
- parent_start, btrfs_header_owner(parent),
- btrfs_header_generation(parent), level, 1);
+ parent_start, root->root_key.objectid,
+ level, 0);
}
if (unlock_orig)
btrfs_tree_unlock(buf);
@@ -384,6 +480,18 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
return 0;
}
+static inline int should_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf)
+{
+ if (btrfs_header_generation(buf) == trans->transid &&
+ !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
+ !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
+ btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
+ return 0;
+ return 1;
+}
+
/*
* cows a single block, see __btrfs_cow_block for the real work.
* This version of it has extra checks so that a block isn't cow'd more than
@@ -411,9 +519,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(1);
}
- if (btrfs_header_generation(buf) == trans->transid &&
- btrfs_header_owner(buf) == root->root_key.objectid &&
- !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+ if (!should_cow_block(trans, root, buf)) {
*cow_ret = buf;
return 0;
}
@@ -469,7 +575,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
/*
* same as comp_keys only with two btrfs_key's
*/
-static int comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
+int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
{
if (k1->objectid > k2->objectid)
return 1;
@@ -845,6 +951,12 @@ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
return -1;
}
+int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
+ int level, int *slot)
+{
+ return bin_search(eb, key, level, slot);
+}
+
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
* NULL is returned on error.
@@ -921,13 +1033,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
root->node = child;
spin_unlock(&root->node_lock);
- ret = btrfs_update_extent_ref(trans, root, child->start,
- child->len,
- mid->start, child->start,
- root->root_key.objectid,
- trans->transid, level - 1);
- BUG_ON(ret);
-
add_root_to_dirty_list(root);
btrfs_tree_unlock(child);
@@ -938,9 +1043,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* once for the path */
free_extent_buffer(mid);
ret = btrfs_free_extent(trans, root, mid->start, mid->len,
- mid->start, root->root_key.objectid,
- btrfs_header_generation(mid),
- level, 1);
+ 0, root->root_key.objectid, level, 1);
/* once for the root ptr */
free_extent_buffer(mid);
return ret;
@@ -998,7 +1101,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
ret = wret;
if (btrfs_header_nritems(right) == 0) {
u64 bytenr = right->start;
- u64 generation = btrfs_header_generation(parent);
u32 blocksize = right->len;
clean_tree_block(trans, root, right);
@@ -1010,9 +1112,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret)
ret = wret;
wret = btrfs_free_extent(trans, root, bytenr,
- blocksize, parent->start,
- btrfs_header_owner(parent),
- generation, level, 1);
+ blocksize, 0,
+ root->root_key.objectid,
+ level, 0);
if (wret)
ret = wret;
} else {
@@ -1047,7 +1149,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
if (btrfs_header_nritems(mid) == 0) {
/* we've managed to empty the middle node, drop it */
- u64 root_gen = btrfs_header_generation(parent);
u64 bytenr = mid->start;
u32 blocksize = mid->len;
@@ -1059,9 +1160,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret)
ret = wret;
wret = btrfs_free_extent(trans, root, bytenr, blocksize,
- parent->start,
- btrfs_header_owner(parent),
- root_gen, level, 1);
+ 0, root->root_key.objectid,
+ level, 0);
if (wret)
ret = wret;
} else {
@@ -1437,7 +1537,7 @@ noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
{
int i;
- if (path->keep_locks || path->lowest_level)
+ if (path->keep_locks)
return;
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -1614,10 +1714,17 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
lowest_unlock = 2;
again:
- if (p->skip_locking)
- b = btrfs_root_node(root);
- else
- b = btrfs_lock_root_node(root);
+ if (p->search_commit_root) {
+ b = root->commit_root;
+ extent_buffer_get(b);
+ if (!p->skip_locking)
+ btrfs_tree_lock(b);
+ } else {
+ if (p->skip_locking)
+ b = btrfs_root_node(root);
+ else
+ b = btrfs_lock_root_node(root);
+ }
while (b) {
level = btrfs_header_level(b);
@@ -1638,11 +1745,9 @@ again:
* then we don't want to set the path blocking,
* so we test it here
*/
- if (btrfs_header_generation(b) == trans->transid &&
- btrfs_header_owner(b) == root->root_key.objectid &&
- !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
+ if (!should_cow_block(trans, root, b))
goto cow_done;
- }
+
btrfs_set_path_blocking(p);
wret = btrfs_cow_block(trans, root, b,
@@ -1764,138 +1869,6 @@ done:
return ret;
}
-int btrfs_merge_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_key *node_keys,
- u64 *nodes, int lowest_level)
-{
- struct extent_buffer *eb;
- struct extent_buffer *parent;
- struct btrfs_key key;
- u64 bytenr;
- u64 generation;
- u32 blocksize;
- int level;
- int slot;
- int key_match;
- int ret;
-
- eb = btrfs_lock_root_node(root);
- ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb);
- BUG_ON(ret);
-
- btrfs_set_lock_blocking(eb);
-
- parent = eb;
- while (1) {
- level = btrfs_header_level(parent);
- if (level == 0 || level <= lowest_level)
- break;
-
- ret = bin_search(parent, &node_keys[lowest_level], level,
- &slot);
- if (ret && slot > 0)
- slot--;
-
- bytenr = btrfs_node_blockptr(parent, slot);
- if (nodes[level - 1] == bytenr)
- break;
-
- blocksize = btrfs_level_size(root, level - 1);
- generation = btrfs_node_ptr_generation(parent, slot);
- btrfs_node_key_to_cpu(eb, &key, slot);
- key_match = !memcmp(&key, &node_keys[level - 1], sizeof(key));
-
- if (generation == trans->transid) {
- eb = read_tree_block(root, bytenr, blocksize,
- generation);
- btrfs_tree_lock(eb);
- btrfs_set_lock_blocking(eb);
- }
-
- /*
- * if node keys match and node pointer hasn't been modified
- * in the running transaction, we can merge the path. for
- * blocks owened by reloc trees, the node pointer check is
- * skipped, this is because these blocks are fully controlled
- * by the space balance code, no one else can modify them.
- */
- if (!nodes[level - 1] || !key_match ||
- (generation == trans->transid &&
- btrfs_header_owner(eb) != BTRFS_TREE_RELOC_OBJECTID)) {
- if (level == 1 || level == lowest_level + 1) {
- if (generation == trans->transid) {
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
- }
- break;
- }
-
- if (generation != trans->transid) {
- eb = read_tree_block(root, bytenr, blocksize,
- generation);
- btrfs_tree_lock(eb);
- btrfs_set_lock_blocking(eb);
- }
-
- ret = btrfs_cow_block(trans, root, eb, parent, slot,
- &eb);
- BUG_ON(ret);
-
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID) {
- if (!nodes[level - 1]) {
- nodes[level - 1] = eb->start;
- memcpy(&node_keys[level - 1], &key,
- sizeof(node_keys[0]));
- } else {
- WARN_ON(1);
- }
- }
-
- btrfs_tree_unlock(parent);
- free_extent_buffer(parent);
- parent = eb;
- continue;
- }
-
- btrfs_set_node_blockptr(parent, slot, nodes[level - 1]);
- btrfs_set_node_ptr_generation(parent, slot, trans->transid);
- btrfs_mark_buffer_dirty(parent);
-
- ret = btrfs_inc_extent_ref(trans, root,
- nodes[level - 1],
- blocksize, parent->start,
- btrfs_header_owner(parent),
- btrfs_header_generation(parent),
- level - 1);
- BUG_ON(ret);
-
- /*
- * If the block was created in the running transaction,
- * it's possible this is the last reference to it, so we
- * should drop the subtree.
- */
- if (generation == trans->transid) {
- ret = btrfs_drop_subtree(trans, root, eb, parent);
- BUG_ON(ret);
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
- } else {
- ret = btrfs_free_extent(trans, root, bytenr,
- blocksize, parent->start,
- btrfs_header_owner(parent),
- btrfs_header_generation(parent),
- level - 1, 1);
- BUG_ON(ret);
- }
- break;
- }
- btrfs_tree_unlock(parent);
- free_extent_buffer(parent);
- return 0;
-}
-
/*
* adjust the pointers going up the tree, starting at level
* making sure the right key of each node is points to 'key'.
@@ -2021,9 +1994,6 @@ static int push_node_left(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(src);
btrfs_mark_buffer_dirty(dst);
- ret = btrfs_update_ref(trans, root, src, dst, dst_nritems, push_items);
- BUG_ON(ret);
-
return ret;
}
@@ -2083,9 +2053,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(src);
btrfs_mark_buffer_dirty(dst);
- ret = btrfs_update_ref(trans, root, src, dst, 0, push_items);
- BUG_ON(ret);
-
return ret;
}
@@ -2105,7 +2072,6 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct extent_buffer *c;
struct extent_buffer *old;
struct btrfs_disk_key lower_key;
- int ret;
BUG_ON(path->nodes[level]);
BUG_ON(path->nodes[level-1] != root->node);
@@ -2117,16 +2083,17 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_node_key(lower, &lower_key, 0);
c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
- root->root_key.objectid, trans->transid,
+ root->root_key.objectid, &lower_key,
level, root->node->start, 0);
if (IS_ERR(c))
return PTR_ERR(c);
- memset_extent_buffer(c, 0, 0, root->nodesize);
+ memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_nritems(c, 1);
btrfs_set_header_level(c, level);
btrfs_set_header_bytenr(c, c->start);
btrfs_set_header_generation(c, trans->transid);
+ btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(c, root->root_key.objectid);
write_extent_buffer(c, root->fs_info->fsid,
@@ -2151,12 +2118,6 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
root->node = c;
spin_unlock(&root->node_lock);
- ret = btrfs_update_extent_ref(trans, root, lower->start,
- lower->len, lower->start, c->start,
- root->root_key.objectid,
- trans->transid, level - 1);
- BUG_ON(ret);
-
/* the super has an extra ref to root->node */
free_extent_buffer(old);
@@ -2244,20 +2205,21 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
}
c_nritems = btrfs_header_nritems(c);
+ mid = (c_nritems + 1) / 2;
+ btrfs_node_key(c, &disk_key, mid);
- split = btrfs_alloc_free_block(trans, root, root->nodesize,
- path->nodes[level + 1]->start,
+ split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
root->root_key.objectid,
- trans->transid, level, c->start, 0);
+ &disk_key, level, c->start, 0);
if (IS_ERR(split))
return PTR_ERR(split);
- btrfs_set_header_flags(split, btrfs_header_flags(c));
+ memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_level(split, btrfs_header_level(c));
btrfs_set_header_bytenr(split, split->start);
btrfs_set_header_generation(split, trans->transid);
+ btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(split, root->root_key.objectid);
- btrfs_set_header_flags(split, 0);
write_extent_buffer(split, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(split),
BTRFS_FSID_SIZE);
@@ -2265,7 +2227,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
(unsigned long)btrfs_header_chunk_tree_uuid(split),
BTRFS_UUID_SIZE);
- mid = (c_nritems + 1) / 2;
copy_extent_buffer(split, c,
btrfs_node_key_ptr_offset(0),
@@ -2278,16 +2239,12 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
- btrfs_node_key(split, &disk_key, 0);
wret = insert_ptr(trans, root, path, &disk_key, split->start,
path->slots[level + 1] + 1,
level + 1);
if (wret)
ret = wret;
- ret = btrfs_update_ref(trans, root, c, split, 0, c_nritems - mid);
- BUG_ON(ret);
-
if (path->slots[level] >= mid) {
path->slots[level] -= mid;
btrfs_tree_unlock(c);
@@ -2360,7 +2317,6 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
u32 right_nritems;
u32 data_end;
u32 this_item_size;
- int ret;
if (empty)
nr = 0;
@@ -2473,9 +2429,6 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(left);
btrfs_mark_buffer_dirty(right);
- ret = btrfs_update_ref(trans, root, left, right, 0, push_items);
- BUG_ON(ret);
-
btrfs_item_key(right, &disk_key, 0);
btrfs_set_node_key(upper, &disk_key, slot + 1);
btrfs_mark_buffer_dirty(upper);
@@ -2720,10 +2673,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (right_nritems)
btrfs_mark_buffer_dirty(right);
- ret = btrfs_update_ref(trans, root, right, left,
- old_left_nritems, push_items);
- BUG_ON(ret);
-
btrfs_item_key(right, &disk_key, 0);
wret = fixup_low_keys(trans, root, path, &disk_key, 1);
if (wret)
@@ -2880,9 +2829,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(l);
BUG_ON(path->slots[0] != slot);
- ret = btrfs_update_ref(trans, root, l, right, 0, nritems);
- BUG_ON(ret);
-
if (mid <= slot) {
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
@@ -2911,6 +2857,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int data_size,
int extend)
{
+ struct btrfs_disk_key disk_key;
struct extent_buffer *l;
u32 nritems;
int mid;
@@ -2918,7 +2865,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
struct extent_buffer *right;
int ret = 0;
int wret;
- int double_split;
+ int split;
int num_doubles = 0;
/* first try to make some room by pushing left and right */
@@ -2945,16 +2892,53 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
return ret;
}
again:
- double_split = 0;
+ split = 1;
l = path->nodes[0];
slot = path->slots[0];
nritems = btrfs_header_nritems(l);
mid = (nritems + 1) / 2;
- right = btrfs_alloc_free_block(trans, root, root->leafsize,
- path->nodes[1]->start,
+ if (mid <= slot) {
+ if (nritems == 1 ||
+ leaf_space_used(l, mid, nritems - mid) + data_size >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ if (slot >= nritems) {
+ split = 0;
+ } else {
+ mid = slot;
+ if (mid != nritems &&
+ leaf_space_used(l, mid, nritems - mid) +
+ data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+ split = 2;
+ }
+ }
+ }
+ } else {
+ if (leaf_space_used(l, 0, mid) + data_size >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ if (!extend && data_size && slot == 0) {
+ split = 0;
+ } else if ((extend || !data_size) && slot == 0) {
+ mid = 1;
+ } else {
+ mid = slot;
+ if (mid != nritems &&
+ leaf_space_used(l, mid, nritems - mid) +
+ data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+ split = 2 ;
+ }
+ }
+ }
+ }
+
+ if (split == 0)
+ btrfs_cpu_key_to_disk(&disk_key, ins_key);
+ else
+ btrfs_item_key(l, &disk_key, mid);
+
+ right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
root->root_key.objectid,
- trans->transid, 0, l->start, 0);
+ &disk_key, 0, l->start, 0);
if (IS_ERR(right)) {
BUG_ON(1);
return PTR_ERR(right);
@@ -2963,6 +2947,7 @@ again:
memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(right, right->start);
btrfs_set_header_generation(right, trans->transid);
+ btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(right, root->root_key.objectid);
btrfs_set_header_level(right, 0);
write_extent_buffer(right, root->fs_info->fsid,
@@ -2973,79 +2958,47 @@ again:
(unsigned long)btrfs_header_chunk_tree_uuid(right),
BTRFS_UUID_SIZE);
- if (mid <= slot) {
- if (nritems == 1 ||
- leaf_space_used(l, mid, nritems - mid) + data_size >
- BTRFS_LEAF_DATA_SIZE(root)) {
- if (slot >= nritems) {
- struct btrfs_disk_key disk_key;
-
- btrfs_cpu_key_to_disk(&disk_key, ins_key);
- btrfs_set_header_nritems(right, 0);
- wret = insert_ptr(trans, root, path,
- &disk_key, right->start,
- path->slots[1] + 1, 1);
- if (wret)
- ret = wret;
+ if (split == 0) {
+ if (mid <= slot) {
+ btrfs_set_header_nritems(right, 0);
+ wret = insert_ptr(trans, root, path,
+ &disk_key, right->start,
+ path->slots[1] + 1, 1);
+ if (wret)
+ ret = wret;
- btrfs_tree_unlock(path->nodes[0]);
- free_extent_buffer(path->nodes[0]);
- path->nodes[0] = right;
- path->slots[0] = 0;
- path->slots[1] += 1;
- btrfs_mark_buffer_dirty(right);
- return ret;
- }
- mid = slot;
- if (mid != nritems &&
- leaf_space_used(l, mid, nritems - mid) +
- data_size > BTRFS_LEAF_DATA_SIZE(root)) {
- double_split = 1;
- }
- }
- } else {
- if (leaf_space_used(l, 0, mid) + data_size >
- BTRFS_LEAF_DATA_SIZE(root)) {
- if (!extend && data_size && slot == 0) {
- struct btrfs_disk_key disk_key;
-
- btrfs_cpu_key_to_disk(&disk_key, ins_key);
- btrfs_set_header_nritems(right, 0);
- wret = insert_ptr(trans, root, path,
- &disk_key,
- right->start,
- path->slots[1], 1);
+ btrfs_tree_unlock(path->nodes[0]);
+ free_extent_buffer(path->nodes[0]);
+ path->nodes[0] = right;
+ path->slots[0] = 0;
+ path->slots[1] += 1;
+ } else {
+ btrfs_set_header_nritems(right, 0);
+ wret = insert_ptr(trans, root, path,
+ &disk_key,
+ right->start,
+ path->slots[1], 1);
+ if (wret)
+ ret = wret;
+ btrfs_tree_unlock(path->nodes[0]);
+ free_extent_buffer(path->nodes[0]);
+ path->nodes[0] = right;
+ path->slots[0] = 0;
+ if (path->slots[1] == 0) {
+ wret = fixup_low_keys(trans, root,
+ path, &disk_key, 1);
if (wret)
ret = wret;
- btrfs_tree_unlock(path->nodes[0]);
- free_extent_buffer(path->nodes[0]);
- path->nodes[0] = right;
- path->slots[0] = 0;
- if (path->slots[1] == 0) {
- wret = fixup_low_keys(trans, root,
- path, &disk_key, 1);
- if (wret)
- ret = wret;
- }
- btrfs_mark_buffer_dirty(right);
- return ret;
- } else if ((extend || !data_size) && slot == 0) {
- mid = 1;
- } else {
- mid = slot;
- if (mid != nritems &&
- leaf_space_used(l, mid, nritems - mid) +
- data_size > BTRFS_LEAF_DATA_SIZE(root)) {
- double_split = 1;
- }
}
}
+ btrfs_mark_buffer_dirty(right);
+ return ret;
}
ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
BUG_ON(ret);
- if (double_split) {
+ if (split == 2) {
BUG_ON(num_doubles != 0);
num_doubles++;
goto again;
@@ -3447,7 +3400,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
/* figure out how many keys we can insert in here */
total_data = data_size[0];
for (i = 1; i < nr; i++) {
- if (comp_cpu_keys(&found_key, cpu_key + i) <= 0)
+ if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
break;
total_data += data_size[i];
}
@@ -3745,9 +3698,7 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
/*
* a helper function to delete the leaf pointed to by path->slots[1] and
- * path->nodes[1]. bytenr is the node block pointer, but since the callers
- * already know it, it is faster to have them pass it down than to
- * read it out of the node again.
+ * path->nodes[1].
*
* This deletes the pointer in path->nodes[1] and frees the leaf
* block extent. zero is returned if it all worked out, < 0 otherwise.
@@ -3755,15 +3706,14 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* The path must have already been setup for deleting the leaf, including
* all the proper balancing. path->nodes[1] must be locked.
*/
-noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 bytenr)
+static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct extent_buffer *leaf)
{
int ret;
- u64 root_gen = btrfs_header_generation(path->nodes[1]);
- u64 parent_start = path->nodes[1]->start;
- u64 parent_owner = btrfs_header_owner(path->nodes[1]);
+ WARN_ON(btrfs_header_generation(leaf) != trans->transid);
ret = del_ptr(trans, root, path, 1, path->slots[1]);
if (ret)
return ret;
@@ -3774,10 +3724,8 @@ noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
*/
btrfs_unlock_up_safe(path, 0);
- ret = btrfs_free_extent(trans, root, bytenr,
- btrfs_level_size(root, 0),
- parent_start, parent_owner,
- root_gen, 0, 1);
+ ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
+ 0, root->root_key.objectid, 0, 0);
return ret;
}
/*
@@ -3845,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (leaf == root->node) {
btrfs_set_header_level(leaf, 0);
} else {
- ret = btrfs_del_leaf(trans, root, path, leaf->start);
+ ret = btrfs_del_leaf(trans, root, path, leaf);
BUG_ON(ret);
}
} else {
@@ -3884,8 +3832,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (btrfs_header_nritems(leaf) == 0) {
path->slots[1] = slot;
- ret = btrfs_del_leaf(trans, root, path,
- leaf->start);
+ ret = btrfs_del_leaf(trans, root, path, leaf);
BUG_ON(ret);
free_extent_buffer(leaf);
} else {