summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-10-12 13:12:21 +0200
committerJens Axboe <axboe@kernel.dk>2021-10-18 06:17:36 -0600
commit6ce913fe3eee14f40f778e85999c9e599dda8c6b (patch)
tree9e49e5eff35e1d0272a92d31460613b0b306aec3
parentd729cf9acb9311956c8a37113dcfa0160a2d9665 (diff)
downloadlinux-6ce913fe3eee14f40f778e85999c9e599dda8c6b.tar.bz2
block: rename REQ_HIPRI to REQ_POLLED
Unlike the RWF_HIPRI userspace ABI which is intentionally kept vague, the bio flag is specific to the polling implementation, so rename and document it properly. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> Link: https://lore.kernel.org/r/20211012111226.760968-12-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-merge.c3
-rw-r--r--block/blk-mq-debugfs.c2
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-mq.h4
-rw-r--r--block/blk.h4
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--mm/page_io.c2
11 files changed, 19 insertions, 20 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 80fa7aa394c7..8eb0e08d5395 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -842,7 +842,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
}
if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
- bio_clear_hipri(bio);
+ bio_clear_polled(bio);
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 9b77b4d6c2a1..f88d7863f997 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -318,8 +318,7 @@ split:
* iopoll in direct IO routine. Given performance gain of iopoll for
* big IO can be trival, disable iopoll when split needed.
*/
- bio_clear_hipri(bio);
-
+ bio_clear_polled(bio);
return bio_split(bio, sectors, GFP_NOIO, bs);
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 3daea160d670..409a8256d9ff 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -287,7 +287,7 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(BACKGROUND),
CMD_FLAG_NAME(NOWAIT),
CMD_FLAG_NAME(NOUNMAP),
- CMD_FLAG_NAME(HIPRI),
+ CMD_FLAG_NAME(POLLED),
};
#undef CMD_FLAG_NAME
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 97c24e461d0a..a34ffcf861c3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -732,7 +732,7 @@ bool blk_mq_complete_request_remote(struct request *rq)
* For a polled request, always complete locallly, it's pointless
* to redirect the completion.
*/
- if (rq->cmd_flags & REQ_HIPRI)
+ if (rq->cmd_flags & REQ_POLLED)
return false;
if (blk_mq_complete_need_ipi(rq)) {
@@ -2278,7 +2278,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
rq_qos_throttle(q, bio);
- hipri = bio->bi_opf & REQ_HIPRI;
+ hipri = bio->bi_opf & REQ_POLLED;
plug = blk_mq_plug(q, bio);
if (plug && plug->cached_rq) {
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5da970bb8865..a9fe01e14951 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -106,9 +106,9 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
enum hctx_type type = HCTX_TYPE_DEFAULT;
/*
- * The caller ensure that if REQ_HIPRI, poll must be enabled.
+ * The caller ensure that if REQ_POLLED, poll must be enabled.
*/
- if (flags & REQ_HIPRI)
+ if (flags & REQ_POLLED)
type = HCTX_TYPE_POLL;
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ;
diff --git a/block/blk.h b/block/blk.h
index cab8d659d8a6..fa05d3f07976 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -416,11 +416,11 @@ extern struct device_attribute dev_attr_events;
extern struct device_attribute dev_attr_events_async;
extern struct device_attribute dev_attr_events_poll_msecs;
-static inline void bio_clear_hipri(struct bio *bio)
+static inline void bio_clear_polled(struct bio *bio)
{
/* can't support alloc cache if we turn off polling */
bio_clear_flag(bio, BIO_PERCPU_CACHE);
- bio->bi_opf &= ~REQ_HIPRI;
+ bio->bi_opf &= ~REQ_POLLED;
}
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3d444b13cd69..ae15cb714596 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -632,7 +632,7 @@ static inline void nvme_init_request(struct request *req,
req->cmd_flags |= REQ_FAILFAST_DRIVER;
if (req->mq_hctx->type == HCTX_TYPE_POLL)
- req->cmd_flags |= REQ_HIPRI;
+ req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 66f507469a31..40b473eea357 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
{
bool new_sd_dp;
bool inject = false;
- bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
+ bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
int k, num_in_q, qdepth;
unsigned long iflags;
u64 ns_from_boot = 0;
@@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_host_max_queue)
sd_dp->hc_idx = get_tag(cmnd);
- if (hipri)
+ if (polled)
ns_from_boot = ktime_get_boottime_ns();
/* one of the resp_*() response functions is called here */
@@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
- if (hipri) {
+ if (polled) {
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (!sd_dp->init_poll) {
@@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
atomic_read(&sdeb_inject_pending)))
sd_dp->aborted = true;
- if (hipri) {
+ if (polled) {
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (!sd_dp->init_poll) {
@@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
if (kt_from_boot < sd_dp->cmpl_ts)
continue;
- } else /* ignoring non REQ_HIPRI requests */
+ } else /* ignoring non REQ_POLLED requests */
continue;
devip = (struct sdebug_dev_info *)scp->device->hostdata;
if (likely(devip))
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d8d27742a75f..c7a2d880e927 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -706,7 +706,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
*/
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
- bio->bi_opf |= REQ_HIPRI;
+ bio->bi_opf |= REQ_POLLED;
if (!is_sync_kiocb(kiocb))
bio->bi_opf |= REQ_NOWAIT;
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5017ba8fc539..f8b9fce68834 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -384,7 +384,7 @@ enum req_flag_bits {
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_HIPRI,
+ __REQ_POLLED, /* caller polls for completion using blk_poll */
/* for driver use */
__REQ_DRV,
@@ -409,7 +409,7 @@ enum req_flag_bits {
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_HIPRI (1ULL << __REQ_HIPRI)
+#define REQ_POLLED (1ULL << __REQ_POLLED)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
diff --git a/mm/page_io.c b/mm/page_io.c
index 5d5543fcefa4..ed2eded74f3a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -416,7 +416,7 @@ int swap_readpage(struct page *page, bool synchronous)
* attempt to access it in the page fault retry time check.
*/
if (synchronous) {
- bio->bi_opf |= REQ_HIPRI;
+ bio->bi_opf |= REQ_POLLED;
get_task_struct(current);
bio->bi_private = current;
}