diff options
author | Jeff Mahoney <jeffm@suse.com> | 2016-06-22 18:54:24 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2016-12-06 16:06:59 +0100 |
commit | 2ff7e61e0d30ff166a2ae94575526bffe11fd1a8 (patch) | |
tree | a2b8aba1d10c010cc247f3e0866dcbd627e852f3 /fs/btrfs/delayed-inode.c | |
parent | afdb571890615059ed4f0625209b379aff6cb08d (diff) | |
download | linux-2ff7e61e0d30ff166a2ae94575526bffe11fd1a8.tar.bz2 |
btrfs: take an fs_info directly when the root is not used otherwise
There are loads of functions in btrfs that accept a root parameter
but only use it to obtain an fs_info pointer. Let's convert those to
just accept an fs_info pointer directly.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/delayed-inode.c')
-rw-r--r-- | fs/btrfs/delayed-inode.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c8ffceb2aff9..33ed79b8d6cc 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -529,10 +529,9 @@ static struct btrfs_delayed_item *__btrfs_next_delayed_item( } static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_delayed_item *item) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *src_rsv; struct btrfs_block_rsv *dst_rsv; u64 num_bytes; @@ -556,10 +555,9 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, return ret; } -static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, +static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info, struct btrfs_delayed_item *item) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *rsv; if (!item->bytes_reserved) @@ -569,7 +567,7 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, trace_btrfs_space_reservation(fs_info, "delayed_item", item->key.objectid, item->bytes_reserved, 0); - btrfs_block_rsv_release(root, rsv, + btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved); } @@ -669,16 +667,15 @@ static int btrfs_delayed_inode_reserve_metadata( if (release) { trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), num_bytes, 0); - btrfs_block_rsv_release(root, src_rsv, num_bytes); + btrfs_block_rsv_release(fs_info, src_rsv, num_bytes); } return ret; } -static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, +static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info, struct btrfs_delayed_node *node) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *rsv; if (!node->bytes_reserved) @@ -687,7 +684,7 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, rsv = &fs_info->delayed_block_rsv; trace_btrfs_space_reservation(fs_info, "delayed_inode", node->inode_id, node->bytes_reserved, 0); - btrfs_block_rsv_release(root, rsv, + btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved); node->bytes_reserved = 0; } @@ -700,6 +697,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_item *item) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_delayed_item *curr, *next; int free_space; int total_data_size = 0, total_size = 0; @@ -716,7 +714,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root, BUG_ON(!path->nodes[0]); leaf = path->nodes[0]; - free_space = btrfs_leaf_free_space(root, leaf); + free_space = btrfs_leaf_free_space(fs_info, leaf); INIT_LIST_HEAD(&head); next = item; @@ -789,7 +787,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root, curr->data_len); slot++; - btrfs_delayed_item_release_metadata(root, curr); + btrfs_delayed_item_release_metadata(fs_info, curr); list_del(&curr->tree_list); btrfs_release_delayed_item(curr); @@ -811,6 +809,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_delayed_item *delayed_item) { + struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *leaf; char *ptr; int ret; @@ -828,7 +827,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, delayed_item->data_len); btrfs_mark_buffer_dirty(leaf); - btrfs_delayed_item_release_metadata(root, delayed_item); + btrfs_delayed_item_release_metadata(fs_info, delayed_item); return 0; } @@ -880,6 +879,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_delayed_item *item) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_delayed_item *curr, *next; struct extent_buffer *leaf; struct btrfs_key key; @@ -929,7 +929,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, goto out; list_for_each_entry_safe(curr, next, &head, tree_list) { - btrfs_delayed_item_release_metadata(root, curr); + btrfs_delayed_item_release_metadata(fs_info, curr); list_del(&curr->tree_list); btrfs_release_delayed_item(curr); } @@ -1015,6 +1015,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_delayed_node *node) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key key; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; @@ -1071,7 +1072,7 @@ out: no_iref: btrfs_release_path(path); err_out: - btrfs_delayed_inode_release_metadata(root, node); + btrfs_delayed_inode_release_metadata(fs_info, node); btrfs_release_delayed_inode(node); return ret; @@ -1136,9 +1137,8 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, * outstanding delayed items cleaned up. */ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, - struct btrfs_root *root, int nr) + struct btrfs_fs_info *fs_info, int nr) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_path *path; @@ -1184,15 +1184,15 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, } int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, - struct btrfs_root *root) + struct btrfs_fs_info *fs_info) { - return __btrfs_run_delayed_items(trans, root, -1); + return __btrfs_run_delayed_items(trans, fs_info, -1); } int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, - struct btrfs_root *root, int nr) + struct btrfs_fs_info *fs_info, int nr) { - return __btrfs_run_delayed_items(trans, root, nr); + return __btrfs_run_delayed_items(trans, fs_info, nr); } int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, @@ -1235,6 +1235,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, int btrfs_commit_inode_delayed_inode(struct inode *inode) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_trans_handle *trans; struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); struct btrfs_path *path; @@ -1266,7 +1267,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode) path->leave_spinning = 1; block_rsv = trans->block_rsv; - trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; + trans->block_rsv = &fs_info->delayed_block_rsv; mutex_lock(&delayed_node->mutex); if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) @@ -1280,7 +1281,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode) trans->block_rsv = block_rsv; trans_out: btrfs_end_transaction(trans, delayed_node->root); - btrfs_btree_balance_dirty(delayed_node->root); + btrfs_btree_balance_dirty(fs_info); out: btrfs_release_delayed_node(delayed_node); @@ -1345,7 +1346,7 @@ again: trans->block_rsv = block_rsv; btrfs_end_transaction(trans, root); - btrfs_btree_balance_dirty_nodelay(root); + btrfs_btree_balance_dirty_nodelay(root->fs_info); release_path: btrfs_release_path(path); @@ -1402,12 +1403,9 @@ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) return 0; } -void btrfs_balance_delayed_items(struct btrfs_root *root) +void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) { - struct btrfs_delayed_root *delayed_root; - struct btrfs_fs_info *fs_info = root->fs_info; - - delayed_root = fs_info->delayed_root; + struct btrfs_delayed_root *delayed_root = fs_info->delayed_root; if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) return; @@ -1432,8 +1430,9 @@ void btrfs_balance_delayed_items(struct btrfs_root *root) /* Will return 0 or -ENOMEM */ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, - struct btrfs_root *root, const char *name, - int name_len, struct inode *dir, + struct btrfs_fs_info *fs_info, + const char *name, int name_len, + struct inode *dir, struct btrfs_disk_key *disk_key, u8 type, u64 index) { @@ -1464,7 +1463,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, btrfs_set_stack_dir_type(dir_item, type); memcpy((char *)(dir_item + 1), name, name_len); - ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); + ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item); /* * we have reserved enough space when we start a new transaction, * so reserving metadata failure is impossible @@ -1475,7 +1474,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, mutex_lock(&delayed_node->mutex); ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); if (unlikely(ret)) { - btrfs_err(root->fs_info, + btrfs_err(fs_info, "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", name_len, name, delayed_node->root->objectid, delayed_node->inode_id, ret); @@ -1488,7 +1487,7 @@ release_node: return ret; } -static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, +static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info, struct btrfs_delayed_node *node, struct btrfs_key *key) { @@ -1501,15 +1500,15 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, return 1; } - btrfs_delayed_item_release_metadata(root, item); + btrfs_delayed_item_release_metadata(fs_info, item); btrfs_release_delayed_item(item); mutex_unlock(&node->mutex); return 0; } int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *dir, - u64 index) + struct btrfs_fs_info *fs_info, + struct inode *dir, u64 index) { struct btrfs_delayed_node *node; struct btrfs_delayed_item *item; @@ -1524,7 +1523,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, item_key.type = BTRFS_DIR_INDEX_KEY; item_key.offset = index; - ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); + ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key); if (!ret) goto end; @@ -1536,7 +1535,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, item->key = item_key; - ret = btrfs_delayed_item_reserve_metadata(trans, root, item); + ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item); /* * we have reserved enough space when we start a new transaction, * so reserving metadata failure is impossible. @@ -1546,7 +1545,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, mutex_lock(&node->mutex); ret = __btrfs_add_delayed_deletion_item(node, item); if (unlikely(ret)) { - btrfs_err(root->fs_info, + btrfs_err(fs_info, "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", index, node->root->objectid, node->inode_id, ret); BUG(); @@ -1902,12 +1901,13 @@ release_node: static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) { struct btrfs_root *root = delayed_node->root; + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_delayed_item *curr_item, *prev_item; mutex_lock(&delayed_node->mutex); curr_item = __btrfs_first_delayed_insertion_item(delayed_node); while (curr_item) { - btrfs_delayed_item_release_metadata(root, curr_item); + btrfs_delayed_item_release_metadata(fs_info, curr_item); prev_item = curr_item; curr_item = __btrfs_next_delayed_item(prev_item); btrfs_release_delayed_item(prev_item); @@ -1915,7 +1915,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) curr_item = __btrfs_first_delayed_deletion_item(delayed_node); while (curr_item) { - btrfs_delayed_item_release_metadata(root, curr_item); + btrfs_delayed_item_release_metadata(fs_info, curr_item); prev_item = curr_item; curr_item = __btrfs_next_delayed_item(prev_item); btrfs_release_delayed_item(prev_item); @@ -1925,7 +1925,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) btrfs_release_delayed_iref(delayed_node); if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { - btrfs_delayed_inode_release_metadata(root, delayed_node); + btrfs_delayed_inode_release_metadata(fs_info, delayed_node); btrfs_release_delayed_inode(delayed_node); } mutex_unlock(&delayed_node->mutex); |