summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-11-17 07:13:54 +0100
committerJens Axboe <axboe@kernel.dk>2021-11-29 06:34:50 -0700
commit79478bf9ea9fa48d30836afa796ac13d8a0f320b (patch)
treeb9c057e680077735ad1d76972b5b03c3c9b08209
parentd58071a8a76d779eedab38033ae4c821c30295a5 (diff)
downloadlinux-79478bf9ea9fa48d30836afa796ac13d8a0f320b.tar.bz2
block: move blk_rq_err_bytes to scsi
blk_rq_err_bytes is only used by the scsi midlayer, so move it there. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20211117061404.331732-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c41
-rw-r--r--drivers/scsi/scsi_lib.c42
-rw-r--r--include/linux/blk-mq.h3
3 files changed, 41 insertions, 45 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 1378d084c770..682b112f513f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1176,47 +1176,6 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
-/**
- * blk_rq_err_bytes - determine number of bytes till the next failure boundary
- * @rq: request to examine
- *
- * Description:
- * A request could be merge of IOs which require different failure
- * handling. This function determines the number of bytes which
- * can be failed from the beginning of the request without
- * crossing into area which need to be retried further.
- *
- * Return:
- * The number of bytes to fail.
- */
-unsigned int blk_rq_err_bytes(const struct request *rq)
-{
- unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
- unsigned int bytes = 0;
- struct bio *bio;
-
- if (!(rq->rq_flags & RQF_MIXED_MERGE))
- return blk_rq_bytes(rq);
-
- /*
- * Currently the only 'mixing' which can happen is between
- * different fastfail types. We can safely fail portions
- * which have all the failfast bits that the first one has -
- * the ones which are at least as eager to fail as the first
- * one.
- */
- for (bio = rq->bio; bio; bio = bio->bi_next) {
- if ((bio->bi_opf & ff) != ff)
- break;
- bytes += bio->bi_iter.bi_size;
- }
-
- /* this could lead to infinite loop */
- BUG_ON(blk_rq_bytes(rq) && !bytes);
- return bytes;
-}
-EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-
static void update_io_ticks(struct block_device *part, unsigned long now,
bool end)
{
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 621d841d819a..5e8b5ecb3245 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -617,6 +617,46 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
}
}
+/**
+ * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ */
+static unsigned int scsi_rq_err_bytes(const struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->rq_flags & RQF_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_opf & ff) != ff)
+ break;
+ bytes += bio->bi_iter.bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
+}
+
/* Helper for scsi_io_completion() when "reprep" action required. */
static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
struct request_queue *q)
@@ -794,7 +834,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
+ if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
return;
fallthrough;
case ACTION_REPREP:
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2949d9ac7484..a78d9a0f2a1b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -947,7 +947,6 @@ struct req_iterator {
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
* blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_stats_sectors() : sectors of the entire request used for stats
@@ -971,8 +970,6 @@ static inline int blk_rq_cur_bytes(const struct request *rq)
return bio_iovec(rq->bio).bv_len;
}
-unsigned int blk_rq_err_bytes(const struct request *rq);
-
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
return blk_rq_bytes(rq) >> SECTOR_SHIFT;