diff options
author | Tejun Heo <tj@kernel.org> | 2018-01-09 08:29:53 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-09 09:31:15 -0700 |
commit | 05707b64aed8f5f1674b25334fb720d651459d5e (patch) | |
tree | 121598221971a290944d767f97ef83486b79fc88 /block/blk-mq.c | |
parent | 5a61c36398d0626bad377a7f5b9391b21e16e91d (diff) | |
download | linux-05707b64aed8f5f1674b25334fb720d651459d5e.tar.bz2 |
blk-mq: rename blk_mq_hw_ctx->queue_rq_srcu to ->srcu
The RCU protection has been expanded to cover both queueing and
completion paths making ->queue_rq_srcu a misnomer. Rename it to
->srcu as suggested by Bart.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Bart Van Assche <Bart.VanAssche@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 275812909d77..0269d44d512e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -219,7 +219,7 @@ void blk_mq_quiesce_queue(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { if (hctx->flags & BLK_MQ_F_BLOCKING) - synchronize_srcu(hctx->queue_rq_srcu); + synchronize_srcu(hctx->srcu); else rcu = true; } @@ -564,7 +564,7 @@ static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) if (!(hctx->flags & BLK_MQ_F_BLOCKING)) rcu_read_unlock(); else - srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); + srcu_read_unlock(hctx->srcu, srcu_idx); } static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) @@ -572,7 +572,7 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) if (!(hctx->flags & BLK_MQ_F_BLOCKING)) rcu_read_lock(); else - *srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); + *srcu_idx = srcu_read_lock(hctx->srcu); } static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate) @@ -937,7 +937,7 @@ static void blk_mq_timeout_work(struct work_struct *work) if (!(hctx->flags & BLK_MQ_F_BLOCKING)) has_rcu = true; else - synchronize_srcu(hctx->queue_rq_srcu); + synchronize_srcu(hctx->srcu); hctx->nr_expired = 0; } @@ -2101,7 +2101,7 @@ static void blk_mq_exit_hctx(struct request_queue *q, set->ops->exit_hctx(hctx, hctx_idx); if (hctx->flags & BLK_MQ_F_BLOCKING) - cleanup_srcu_struct(hctx->queue_rq_srcu); + cleanup_srcu_struct(hctx->srcu); blk_mq_remove_cpuhp(hctx); blk_free_flush_queue(hctx->fq); @@ -2174,7 +2174,7 @@ static int blk_mq_init_hctx(struct request_queue *q, goto free_fq; if (hctx->flags & BLK_MQ_F_BLOCKING) - init_srcu_struct(hctx->queue_rq_srcu); + init_srcu_struct(hctx->srcu); blk_mq_debugfs_register_hctx(q, hctx); @@ -2463,7 +2463,7 @@ static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) { int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); - BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu), + BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), __alignof__(struct blk_mq_hw_ctx)) != sizeof(struct blk_mq_hw_ctx)); |