diff options
author | Qu Wenruo <quwenruo@cn.fujitsu.com> | 2014-03-06 04:19:50 +0000 |
---|---|---|
committer | Josef Bacik <jbacik@fb.com> | 2014-03-10 15:17:21 -0400 |
commit | 52483bc26f0e95c91e8fd07f9def588bf89664f8 (patch) | |
tree | 49c742d358bafac2c5339715524a9419915cdaa0 /fs/btrfs/async-thread.c | |
parent | 6db8914f9763d3f0a7610b497d44f93a4c17e62e (diff) | |
download | linux-52483bc26f0e95c91e8fd07f9def588bf89664f8.tar.bz2 |
btrfs: Add ftrace for btrfs_workqueue
Add ftrace for btrfs_workqueue for further workqueue tunning.
This patch needs to applied after the workqueue replace patchset.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r-- | fs/btrfs/async-thread.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index d8c07e5c1f24..00623dd16b81 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -24,6 +24,7 @@ #include <linux/freezer.h> #include <linux/workqueue.h> #include "async-thread.h" +#include "ctree.h" #define WORK_DONE_BIT 0 #define WORK_ORDER_DONE_BIT 1 @@ -210,6 +211,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) */ if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) break; + trace_btrfs_ordered_sched(work); spin_unlock_irqrestore(lock, flags); work->ordered_func(work); @@ -223,6 +225,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) * with the lock held though */ work->ordered_free(work); + trace_btrfs_all_work_done(work); } spin_unlock_irqrestore(lock, flags); } @@ -246,12 +249,15 @@ static void normal_work_helper(struct work_struct *arg) need_order = 1; wq = work->wq; + trace_btrfs_work_sched(work); thresh_exec_hook(wq); work->func(work); if (need_order) { set_bit(WORK_DONE_BIT, &work->flags); run_ordered_work(wq); } + if (!need_order) + trace_btrfs_all_work_done(work); } void btrfs_init_work(struct btrfs_work *work, @@ -280,6 +286,7 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, spin_unlock_irqrestore(&wq->list_lock, flags); } queue_work(wq->normal_wq, &work->normal_work); + trace_btrfs_work_queued(work); } void btrfs_queue_work(struct btrfs_workqueue *wq, |