summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>2019-08-22 21:45:14 -0700
committerJens Axboe <axboe@kernel.dk>2019-08-23 06:58:04 -0600
commitd4b186ed227b80334abf1fe2c918c0ddc4374f38 (patch)
tree6d877f5efb7d24c3c4114713f60616c67384e9a8
parentd1916c86ccdcb67996278a850a22762102702d85 (diff)
downloadlinux-d4b186ed227b80334abf1fe2c918c0ddc4374f38.tar.bz2
null_blk: move duplicate code to callers
This is a preparation patch which moves the duplicate code for sectors and nr_sectors calculations for bio vs request mode into their respective callers (null_queue_bio(), null_qeueue_req()). Now the core function only deals with the respective actions and commands instead of having to calculte the bio vs req operations and different sector related variables. We also move the flush command handling at the top which significantly simplifies the rest of the code. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/block/null_blk_main.c66
1 files changed, 21 insertions, 45 deletions
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 99c56d72ff78..7277f2db8ec9 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1133,7 +1133,8 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true);
}
-static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
+ sector_t nr_sectors, enum req_opf op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
@@ -1156,60 +1157,31 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
}
}
+ if (op == REQ_OP_FLUSH) {
+ cmd->error = errno_to_blk_status(null_handle_flush(nullb));
+ goto out;
+ }
if (nullb->dev->badblocks.shift != -1) {
int bad_sectors;
- sector_t sector, size, first_bad;
- bool is_flush = true;
-
- if (dev->queue_mode == NULL_Q_BIO &&
- bio_op(cmd->bio) != REQ_OP_FLUSH) {
- is_flush = false;
- sector = cmd->bio->bi_iter.bi_sector;
- size = bio_sectors(cmd->bio);
- }
- if (dev->queue_mode != NULL_Q_BIO &&
- req_op(cmd->rq) != REQ_OP_FLUSH) {
- is_flush = false;
- sector = blk_rq_pos(cmd->rq);
- size = blk_rq_sectors(cmd->rq);
- }
- if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
- size, &first_bad, &bad_sectors)) {
+ sector_t first_bad;
+
+ if (badblocks_check(&nullb->dev->badblocks, sector, nr_sectors,
+ &first_bad, &bad_sectors)) {
cmd->error = BLK_STS_IOERR;
goto out;
}
}
if (dev->memory_backed) {
- if (dev->queue_mode == NULL_Q_BIO) {
- if (bio_op(cmd->bio) == REQ_OP_FLUSH)
- err = null_handle_flush(nullb);
- else
- err = null_handle_bio(cmd);
- } else {
- if (req_op(cmd->rq) == REQ_OP_FLUSH)
- err = null_handle_flush(nullb);
- else
- err = null_handle_rq(cmd);
- }
+ if (dev->queue_mode == NULL_Q_BIO)
+ err = null_handle_bio(cmd);
+ else
+ err = null_handle_rq(cmd);
}
+
cmd->error = errno_to_blk_status(err);
if (!cmd->error && dev->zoned) {
- sector_t sector;
- unsigned int nr_sectors;
- enum req_opf op;
-
- if (dev->queue_mode == NULL_Q_BIO) {
- op = bio_op(cmd->bio);
- sector = cmd->bio->bi_iter.bi_sector;
- nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
- } else {
- op = req_op(cmd->rq);
- sector = blk_rq_pos(cmd->rq);
- nr_sectors = blk_rq_sectors(cmd->rq);
- }
-
if (op == REQ_OP_WRITE)
null_zone_write(cmd, sector, nr_sectors);
else if (op == REQ_OP_ZONE_RESET)
@@ -1282,6 +1254,8 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
{
+ sector_t sector = bio->bi_iter.bi_sector;
+ sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = q->queuedata;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
@@ -1289,7 +1263,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
cmd = alloc_cmd(nq, 1);
cmd->bio = bio;
- null_handle_cmd(cmd);
+ null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
return BLK_QC_T_NONE;
}
@@ -1323,6 +1297,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
struct nullb_queue *nq = hctx->driver_data;
+ sector_t nr_sectors = blk_rq_sectors(bd->rq);
+ sector_t sector = blk_rq_pos(bd->rq);
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
@@ -1351,7 +1327,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (should_timeout_request(bd->rq))
return BLK_STS_OK;
- return null_handle_cmd(cmd);
+ return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
}
static const struct blk_mq_ops null_mq_ops = {