diff options
author | Bart Van Assche <bart.vanassche@wdc.com> | 2017-08-17 13:13:22 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-08-18 08:45:29 -0600 |
commit | 8fe700650ef69a561a1745764aa42252cfee9c19 (patch) | |
tree | 9f45f0195c24c3aab645d2aad1f222091cc160ee | |
parent | 3d17a679d3514c6727dcf2a9d9f45c709da5352e (diff) | |
download | linux-8fe700650ef69a561a1745764aa42252cfee9c19.tar.bz2 |
skd: Convert explicit skd_request_fn() calls
This will make it easier to convert this driver to the blk-mq
approach. This patch also reduces interrupt latency by moving
skd_request_fn() calls out of the skd_isr() interrupt.
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/block/skd_main.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 8040500ba09c..3db89707b227 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2806,7 +2806,7 @@ static void skd_completion_worker(struct work_struct *work) * process everything in compq */ skd_isr_completion_posted(skdev, 0, &flush_enqueued); - skd_request_fn(skdev->queue); + blk_run_queue_async(skdev->queue); spin_unlock_irqrestore(&skdev->lock, flags); } @@ -2882,12 +2882,12 @@ skd_isr(int irq, void *ptr) } if (unlikely(flush_enqueued)) - skd_request_fn(skdev->queue); + blk_run_queue_async(skdev->queue); if (deferred) schedule_work(&skdev->completion_worker); else if (!flush_enqueued) - skd_request_fn(skdev->queue); + blk_run_queue_async(skdev->queue); spin_unlock(&skdev->lock); @@ -3588,12 +3588,12 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data) deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, &flush_enqueued); if (flush_enqueued) - skd_request_fn(skdev->queue); + blk_run_queue_async(skdev->queue); if (deferred) schedule_work(&skdev->completion_worker); else if (!flush_enqueued) - skd_request_fn(skdev->queue); + blk_run_queue_async(skdev->queue); spin_unlock_irqrestore(&skdev->lock, flags); |