diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 12:01:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 12:01:49 -0800 |
commit | 97d0bf96a0d0986f466c3ff59f2ace801e33dc69 (patch) | |
tree | 34dc9957a14562f70a174c8ae5af322cea5befa9 /include/trace | |
parent | 1b88176b9c72fb4edd5920969aef94c5cd358337 (diff) | |
parent | fa17ed069c61286b26382e23b57a62930657b9c1 (diff) | |
download | linux-97d0bf96a0d0986f466c3ff59f2ace801e33dc69.tar.bz2 |
Merge tag 'for-5.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba:
"User visible changes:
- new block group profiles: RAID1 with 3- and 4- copies
- RAID1 in btrfs has always 2 copies, now add support for 3 and 4
- this is an incompat feature (named RAID1C34)
- recommended use of RAID1C3 is replacement of RAID6 profile on
metadata, this brings a more reliable resiliency against 2
device loss/damage
- support for new checksums
- per-filesystem, set at mkfs time
- fast hash (crc32c successor): xxhash, 64bit digest
- strong hashes (both 256bit): sha256 (slower, FIPS), blake2b
(faster)
- the blake2b module goes via the crypto tree, btrfs.ko has a
soft dependency
- speed up lseek, don't take inode locks unnecessarily, this can
speed up parallel SEEK_CUR/SEEK_SET/SEEK_END by 80%
- send:
- allow clone operations within the same file
- limit maximum number of sent clone references to avoid slow
backref walking
- error message improvements: device scan prints process name and PID
Core changes:
- cleanups
- remove unique workqueue helpers, used to provide a way to avoid
deadlocks in the workqueue code, now done in a simpler way
- remove lots of indirect function calls in compression code
- extent IO tree code moved out of extent_io.c
- cleanup backup superblock handling at mount time
- transaction life cycle documentation and cleanups
- locking code cleanups, annotations and documentation
- add more cold, const, pure function attributes
- removal of unused or redundant struct members or variables
- new tree-checker sanity tests
- try to detect missing INODE_ITEM, cross-reference checks of
DIR_ITEM, DIR_INDEX, INODE_REF, and XATTR_* items
- remove own bio scheduling code (used to avoid checksum submissions
being stuck behind other IO), replaced by cgroup controller-based
code to allow better control and avoid priority inversions in cases
where the custom and cgroup scheduling disagreed
Fixes:
- avoid getting stuck during cyclic writebacks
- fix trimming of ranges crossing block group boundaries
- fix rename exchange on subvolumes, all involved subvolumes need to
be recorded in the transaction"
* tag 'for-5.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (137 commits)
btrfs: drop bdev argument from submit_extent_page
btrfs: remove extent_map::bdev
btrfs: drop bio_set_dev where not needed
btrfs: get bdev directly from fs_devices in submit_extent_page
btrfs: record all roots for rename exchange on a subvol
Btrfs: fix block group remaining RO forever after error during device replace
btrfs: scrub: Don't check free space before marking a block group RO
btrfs: change btrfs_fs_devices::rotating to bool
btrfs: change btrfs_fs_devices::seeding to bool
btrfs: rename btrfs_block_group_cache
btrfs: block-group: Reuse the item key from caller of read_one_block_group()
btrfs: block-group: Refactor btrfs_read_block_groups()
btrfs: document extent buffer locking
btrfs: access eb::blocking_writers according to ACCESS_ONCE policies
btrfs: set blocking_writers directly, no increment or decrement
btrfs: merge blocking_writers branches in btrfs_tree_read_lock
btrfs: drop incompat bit for raid1c34 after last block group is gone
btrfs: add incompat for raid1 with 3, 4 copies
btrfs: add support for 4-copy replication (raid1c4)
btrfs: add support for 3-copy replication (raid1c3)
...
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/events/btrfs.h | 131 |
1 files changed, 64 insertions, 67 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 75ae1899452b..620bf1b38fba 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -19,7 +19,7 @@ struct btrfs_delayed_ref_node; struct btrfs_delayed_tree_ref; struct btrfs_delayed_data_ref; struct btrfs_delayed_ref_head; -struct btrfs_block_group_cache; +struct btrfs_block_group; struct btrfs_free_cluster; struct map_lookup; struct extent_buffer; @@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( blkcnt_t, blocks ) + __field( u64, blocks ) __field( u64, disk_i_size ) __field( u64, generation ) __field( u64, last_trans ) @@ -194,7 +194,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, show_root_type(__entry->root_objectid), __entry->generation, __entry->ino, - (unsigned long long)__entry->blocks, + __entry->blocks, __entry->disk_i_size, __entry->last_trans, __entry->logged_trans) @@ -292,7 +292,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, TRACE_EVENT(btrfs_handle_em_exist, - TP_PROTO(struct btrfs_fs_info *fs_info, + TP_PROTO(const struct btrfs_fs_info *fs_info, const struct extent_map *existing, const struct extent_map *map, u64 start, u64 len), @@ -330,8 +330,8 @@ TRACE_EVENT(btrfs_handle_em_exist, /* file extent item */ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start), @@ -385,8 +385,8 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, DECLARE_EVENT_CLASS( btrfs__file_extent_item_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start), @@ -426,8 +426,8 @@ DECLARE_EVENT_CLASS( DEFINE_EVENT( btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start) ); @@ -435,8 +435,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start) ); @@ -444,8 +444,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start) ); @@ -453,8 +453,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start) ); @@ -574,7 +574,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __field( char, for_kupdate ) __field( char, for_reclaim ) __field( char, range_cyclic ) - __field( pgoff_t, writeback_index ) + __field( unsigned long, writeback_index ) __field( u64, root_objectid ) ), @@ -603,7 +603,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->range_start, __entry->range_end, __entry->for_kupdate, __entry->for_reclaim, __entry->range_cyclic, - (unsigned long)__entry->writeback_index) + __entry->writeback_index) ); DEFINE_EVENT(btrfs__writepage, __extent_writepage, @@ -622,7 +622,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( pgoff_t, index ) + __field( unsigned long, index ) __field( u64, start ) __field( u64, end ) __field( int, uptodate ) @@ -642,7 +642,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu " "end=%llu uptodate=%d", show_root_type(__entry->root_objectid), - __entry->ino, (unsigned long)__entry->index, + __entry->ino, __entry->index, __entry->start, __entry->end, __entry->uptodate) ); @@ -699,7 +699,7 @@ TRACE_EVENT(btrfs_sync_fs, TRACE_EVENT(btrfs_add_block_group, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_block_group_cache *block_group, int create), + const struct btrfs_block_group *block_group, int create), TP_ARGS(fs_info, block_group, create), @@ -713,11 +713,10 @@ TRACE_EVENT(btrfs_add_block_group, ), TP_fast_assign_btrfs(fs_info, - __entry->offset = block_group->key.objectid; - __entry->size = block_group->key.offset; + __entry->offset = block_group->start; + __entry->size = block_group->length; __entry->flags = block_group->flags; - __entry->bytes_used = - btrfs_block_group_used(&block_group->item); + __entry->bytes_used = block_group->used; __entry->bytes_super = block_group->bytes_super; __entry->create = create; ), @@ -1018,7 +1017,7 @@ TRACE_EVENT(btrfs_cow_block, TRACE_EVENT(btrfs_space_reservation, - TP_PROTO(const struct btrfs_fs_info *fs_info, char *type, u64 val, + TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val, u64 bytes, int reserve), TP_ARGS(fs_info, type, val, bytes, reserve), @@ -1051,7 +1050,7 @@ TRACE_EVENT(btrfs_space_reservation, TRACE_EVENT(btrfs_trigger_flush, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes, - int flush, char *reason), + int flush, const char *reason), TP_ARGS(fs_info, flags, bytes, flush, reason), @@ -1185,7 +1184,7 @@ TRACE_EVENT(find_free_extent, DECLARE_EVENT_CLASS(btrfs__reserve_extent, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len), @@ -1198,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = start; __entry->len = len; @@ -1215,7 +1214,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len) @@ -1223,7 +1222,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len) @@ -1231,7 +1230,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, TRACE_EVENT(btrfs_find_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 bytes, u64 empty_size, u64 min_bytes), TP_ARGS(block_group, start, bytes, empty_size, min_bytes), @@ -1246,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = start; __entry->bytes = bytes; @@ -1264,7 +1263,7 @@ TRACE_EVENT(btrfs_find_cluster, TRACE_EVENT(btrfs_failed_cluster_setup, - TP_PROTO(const struct btrfs_block_group_cache *block_group), + TP_PROTO(const struct btrfs_block_group *block_group), TP_ARGS(block_group), @@ -1273,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; ), TP_printk_btrfs("block_group=%llu", __entry->bg_objectid) @@ -1281,7 +1280,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, TRACE_EVENT(btrfs_setup_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, + TP_PROTO(const struct btrfs_block_group *block_group, const struct btrfs_free_cluster *cluster, u64 size, int bitmap), @@ -1297,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = cluster->window_start; __entry->max_size = cluster->max_size; @@ -1325,17 +1324,17 @@ TRACE_EVENT(alloc_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) __field(gfp_t, mask) - __field(unsigned long, ip) + __field(const void*, ip) ), TP_fast_assign( __entry->state = state, __entry->mask = mask, - __entry->ip = IP + __entry->ip = (const void *)IP ), TP_printk("state=%p mask=%s caller=%pS", __entry->state, - show_gfp_flags(__entry->mask), (const void *)__entry->ip) + show_gfp_flags(__entry->mask), __entry->ip) ); TRACE_EVENT(free_extent_state, @@ -1346,16 +1345,15 @@ TRACE_EVENT(free_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) - __field(unsigned long, ip) + __field(const void*, ip) ), TP_fast_assign( __entry->state = state, - __entry->ip = IP + __entry->ip = (const void *)IP ), - TP_printk("state=%p caller=%pS", __entry->state, - (const void *)__entry->ip) + TP_printk("state=%p caller=%pS", __entry->state, __entry->ip) ); DECLARE_EVENT_CLASS(btrfs__work, @@ -1389,9 +1387,9 @@ DECLARE_EVENT_CLASS(btrfs__work, ); /* - * For situiations when the work is freed, we pass fs_info and a tag that that - * matches address of the work structure so it can be paired with the - * scheduling event. + * For situations when the work is freed, we pass fs_info and a tag that matches + * the address of the work structure so it can be paired with the scheduling + * event. DO NOT add anything here that dereferences wtag. */ DECLARE_EVENT_CLASS(btrfs__work__done, @@ -1567,8 +1565,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent, ), TP_printk_btrfs("bytenr=%llu num_bytes=%llu", - (unsigned long long)__entry->bytenr, - (unsigned long long)__entry->num_bytes) + __entry->bytenr, __entry->num_bytes) ); DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents, @@ -1644,7 +1641,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent, TRACE_EVENT(qgroup_update_counters, TP_PROTO(const struct btrfs_fs_info *fs_info, - struct btrfs_qgroup *qgroup, + const struct btrfs_qgroup *qgroup, u64 cur_old_count, u64 cur_new_count), TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count), @@ -1825,7 +1822,7 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert, ); TRACE_EVENT(btrfs_inode_mod_outstanding_extents, - TP_PROTO(struct btrfs_root *root, u64 ino, int mod), + TP_PROTO(const struct btrfs_root *root, u64 ino, int mod), TP_ARGS(root, ino, mod), @@ -1847,7 +1844,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents, ); DECLARE_EVENT_CLASS(btrfs__block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache), @@ -1859,9 +1856,9 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ), TP_fast_assign_btrfs(bg_cache->fs_info, - __entry->bytenr = bg_cache->key.objectid, - __entry->len = bg_cache->key.offset, - __entry->used = btrfs_block_group_used(&bg_cache->item); + __entry->bytenr = bg_cache->start, + __entry->len = bg_cache->length, + __entry->used = bg_cache->used; __entry->flags = bg_cache->flags; ), @@ -1871,19 +1868,19 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ); DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); @@ -1906,7 +1903,7 @@ TRACE_EVENT(btrfs_set_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -1945,7 +1942,7 @@ TRACE_EVENT(btrfs_clear_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -1985,7 +1982,7 @@ TRACE_EVENT(btrfs_convert_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -2094,8 +2091,8 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); DECLARE_EVENT_CLASS(btrfs__space_info_update, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff), @@ -2117,16 +2114,16 @@ DECLARE_EVENT_CLASS(btrfs__space_info_update, DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff) ); DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff) ); |