diff options
| author | Kent Overstreet <kmo@daterainc.com> | 2014-02-12 18:43:32 -0800 | 
|---|---|---|
| committer | Kent Overstreet <kmo@daterainc.com> | 2014-03-18 12:22:35 -0700 | 
| commit | 7159b1ad3dded9da040b5c608acf3d52d50f661e (patch) | |
| tree | 3ec196333d8ae22e359dc7d16fe8d48b8352fbea /include/trace | |
| parent | 3f5e0a34daed197aa55d0c6b466bb4cd03babb4f (diff) | |
| download | linux-7159b1ad3dded9da040b5c608acf3d52d50f661e.tar.bz2 | |
bcache: Better alloc tracepoints
Change the invalidate tracepoint to indicate how much data we're invalidating,
and change the alloc tracepoints to indicate what offset they're for.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'include/trace')
| -rw-r--r-- | include/trace/events/bcache.h | 48 | 
1 files changed, 34 insertions, 14 deletions
| diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 7110897c3dfa..8fc2a7134d3c 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -399,26 +399,43 @@ TRACE_EVENT(bcache_keyscan,  /* Allocator */ -TRACE_EVENT(bcache_alloc_invalidate, -	TP_PROTO(struct cache *ca), -	TP_ARGS(ca), +TRACE_EVENT(bcache_invalidate, +	TP_PROTO(struct cache *ca, size_t bucket), +	TP_ARGS(ca, bucket),  	TP_STRUCT__entry( -		__field(unsigned,	free			) -		__field(unsigned,	free_inc		) -		__field(unsigned,	free_inc_size		) -		__field(unsigned,	unused			) +		__field(unsigned,	sectors			) +		__field(dev_t,		dev			) +		__field(__u64,		offset			)  	),  	TP_fast_assign( -		__entry->free		= fifo_used(&ca->free[RESERVE_NONE]); -		__entry->free_inc	= fifo_used(&ca->free_inc); -		__entry->free_inc_size	= ca->free_inc.size; -		__entry->unused		= fifo_used(&ca->unused); +		__entry->dev		= ca->bdev->bd_dev; +		__entry->offset		= bucket << ca->set->bucket_bits; +		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);  	), -	TP_printk("free %u free_inc %u/%u unused %u", __entry->free, -		  __entry->free_inc, __entry->free_inc_size, __entry->unused) +	TP_printk("invalidated %u sectors at %d,%d sector=%llu", +		  __entry->sectors, MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->offset) +); + +TRACE_EVENT(bcache_alloc, +	TP_PROTO(struct cache *ca, size_t bucket), +	TP_ARGS(ca, bucket), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(__u64,		offset			) +	), + +	TP_fast_assign( +		__entry->dev		= ca->bdev->bd_dev; +		__entry->offset		= bucket << ca->set->bucket_bits; +	), + +	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->offset)  );  TRACE_EVENT(bcache_alloc_fail, @@ -426,6 +443,7 @@ TRACE_EVENT(bcache_alloc_fail,  	TP_ARGS(ca, reserve),  	TP_STRUCT__entry( +		__field(dev_t,		dev			)  		__field(unsigned,	free			)  		__field(unsigned,	free_inc		)  		__field(unsigned,	unused			) @@ -433,13 +451,15 @@ TRACE_EVENT(bcache_alloc_fail,  	),  	TP_fast_assign( +		__entry->dev		= ca->bdev->bd_dev;  		__entry->free		= fifo_used(&ca->free[reserve]);  		__entry->free_inc	= fifo_used(&ca->free_inc);  		__entry->unused		= fifo_used(&ca->unused);  		__entry->blocked	= atomic_read(&ca->set->prio_blocked);  	), -	TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free, +	TP_printk("alloc fail %d,%d free %u free_inc %u unused %u blocked %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,  		  __entry->free_inc, __entry->unused, __entry->blocked)  ); |