diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-15 12:17:28 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-15 12:17:28 -0700 |
commit | 0d945c1f966b2bcb67bb12be749da0a7fb00201b (patch) | |
tree | 05a4bf8f0d43cf95878316a0d1e93f018c0bbec9 /block/blk-ioc.c | |
parent | 6d46964230d182c4b6097379738849a809d791dc (diff) | |
download | linux-0d945c1f966b2bcb67bb12be749da0a7fb00201b.tar.bz2 |
block: remove the queue_lock indirection
With the legacy request path gone there is no good reason to keep
queue_lock as a pointer, we can always use the embedded lock now.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixed floppy and blk-cgroup missing conversions and half done edits.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index f91ca6b70d6a..5ed59ac6ae58 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -110,9 +110,9 @@ static void ioc_release_fn(struct work_struct *work) struct io_cq, ioc_node); struct request_queue *q = icq->q; - if (spin_trylock(q->queue_lock)) { + if (spin_trylock(&q->queue_lock)) { ioc_destroy_icq(icq); - spin_unlock(q->queue_lock); + spin_unlock(&q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); cpu_relax(); @@ -233,9 +233,9 @@ void ioc_clear_queue(struct request_queue *q) { LIST_HEAD(icq_list); - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); list_splice_init(&q->icq_list, &icq_list); - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); __ioc_clear_queue(&icq_list); } @@ -326,7 +326,7 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) { struct io_cq *icq; - lockdep_assert_held(q->queue_lock); + lockdep_assert_held(&q->queue_lock); /* * icq's are indexed from @ioc using radix tree and hint pointer, @@ -385,7 +385,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, INIT_HLIST_NODE(&icq->ioc_node); /* lock both q and ioc and try to link @icq */ - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); spin_lock(&ioc->lock); if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { @@ -401,7 +401,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, } spin_unlock(&ioc->lock); - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); radix_tree_preload_end(); return icq; } |