From ae948fd6d02930a7e8e7c492d9627dfef18e7d7f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 24 May 2022 14:15:28 +0200 Subject: blk-mq: remove __blk_execute_rq_nowait We don't want to plug for synchronous execution that where we immediately wait for the request. Once that is done not a whole lot of code is shared, so just remove __blk_execute_rq_nowait. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220524121530.943123-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 69 +++++++++++++++++++++++++--------------------------------- 1 file changed, 30 insertions(+), 39 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index ae116b755648..31a89d1004b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1203,28 +1203,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) plug->rq_count++; } -static void __blk_execute_rq_nowait(struct request *rq, bool at_head, - rq_end_io_fn *done, bool use_plug) -{ - WARN_ON(irqs_disabled()); - WARN_ON(!blk_rq_is_passthrough(rq)); - - rq->end_io = done; - - blk_account_io_start(rq); - - if (use_plug && current->plug) { - blk_add_rq_to_plug(current->plug, rq); - return; - } - /* - * don't check dying flag for MQ because the request won't - * be reused after dying flag is set - */ - blk_mq_sched_insert_request(rq, at_head, true, false); -} - - /** * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert @@ -1240,8 +1218,16 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head, */ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) { - __blk_execute_rq_nowait(rq, at_head, done, true); + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); + rq->end_io = done; + + blk_account_io_start(rq); + if (current->plug) + blk_add_rq_to_plug(current->plug, rq); + else + blk_mq_sched_insert_request(rq, at_head, true, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); @@ -1277,27 +1263,32 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) blk_status_t blk_execute_rq(struct request *rq, bool at_head) { DECLARE_COMPLETION_ONSTACK(wait); - unsigned long hang_check; - /* - * iopoll requires request to be submitted to driver, so can't - * use plug - */ + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); + rq->end_io_data = &wait; - __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq, - !blk_rq_is_poll(rq)); + rq->end_io = blk_end_sync_rq; - /* Prevent hang_check timer from firing at us during very long I/O */ - hang_check = sysctl_hung_task_timeout_secs; + blk_account_io_start(rq); + blk_mq_sched_insert_request(rq, at_head, true, false); - if (blk_rq_is_poll(rq)) + if (blk_rq_is_poll(rq)) { blk_rq_poll_completion(rq, &wait); - else if (hang_check) - while (!wait_for_completion_io_timeout(&wait, - hang_check * (HZ/2))) - ; - else - wait_for_completion_io(&wait); + } else { + /* + * Prevent hang_check timer from firing at us during very long + * I/O + */ + unsigned long hang_check = sysctl_hung_task_timeout_secs; + + if (hang_check) + while (!wait_for_completion_io_timeout(&wait, + hang_check * (HZ/2))) + ; + else + wait_for_completion_io(&wait); + } return (blk_status_t)(uintptr_t)rq->end_io_data; } -- cgit v1.2.3 From 32ac5a9b8bc511edcd81f03c3e21753789475709 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 24 May 2022 14:15:29 +0200 Subject: blk-mq: avoid a mess of casts for blk_end_sync_rq Instead of trying to cast a __bitwise 32-bit integer to a larger integer and then a pointer, just allow a struct with the blk_status_t and the completion on stack and set the end_io_data to that. Use the opportunity to move the code to where it belongs and drop rather confusing comments. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220524121530.943123-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 43 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 31a89d1004b8..28b3e6db9849 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1151,24 +1151,6 @@ void blk_mq_start_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_start_request); -/** - * blk_end_sync_rq - executes a completion event on a request - * @rq: request to complete - * @error: end I/O status of the request - */ -static void blk_end_sync_rq(struct request *rq, blk_status_t error) -{ - struct completion *waiting = rq->end_io_data; - - rq->end_io_data = (void *)(uintptr_t)error; - - /* - * complete last, if this is a stack request the process (and thus - * the rq pointer) could be invalid right after this complete() - */ - complete(waiting); -} - /* * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging @@ -1231,6 +1213,19 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +static void blk_end_sync_rq(struct request *rq, blk_status_t ret) +{ + struct blk_rq_wait *wait = rq->end_io_data; + + wait->ret = ret; + complete(&wait->done); +} + static bool blk_rq_is_poll(struct request *rq) { if (!rq->mq_hctx) @@ -1262,7 +1257,9 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { - DECLARE_COMPLETION_ONSTACK(wait); + struct blk_rq_wait wait = { + .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), + }; WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); @@ -1274,7 +1271,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) blk_mq_sched_insert_request(rq, at_head, true, false); if (blk_rq_is_poll(rq)) { - blk_rq_poll_completion(rq, &wait); + blk_rq_poll_completion(rq, &wait.done); } else { /* * Prevent hang_check timer from firing at us during very long @@ -1283,14 +1280,14 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) unsigned long hang_check = sysctl_hung_task_timeout_secs; if (hang_check) - while (!wait_for_completion_io_timeout(&wait, + while (!wait_for_completion_io_timeout(&wait.done, hang_check * (HZ/2))) ; else - wait_for_completion_io(&wait); + wait_for_completion_io(&wait.done); } - return (blk_status_t)(uintptr_t)rq->end_io_data; + return wait.ret; } EXPORT_SYMBOL(blk_execute_rq); -- cgit v1.2.3 From e2e530867245d051dc7800b0d07193b3e581f5b9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 24 May 2022 14:15:30 +0200 Subject: blk-mq: remove the done argument to blk_execute_rq_nowait Let the caller set it together with the end_io_data instead of passing a pointless argument. Note the the target code did in fact already set it and then just overrode it again by calling blk_execute_rq_nowait. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Kanchan Joshi Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220524121530.943123-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 5 +---- drivers/block/sx8.c | 4 ++-- drivers/nvme/host/core.c | 3 ++- drivers/nvme/host/ioctl.c | 3 ++- drivers/nvme/host/pci.c | 10 +++++++--- drivers/nvme/target/passthru.c | 3 ++- drivers/scsi/scsi_error.c | 5 +++-- drivers/scsi/sg.c | 3 ++- drivers/scsi/st.c | 3 ++- drivers/scsi/ufs/ufshpb.c | 6 ++++-- drivers/target/target_core_pscsi.c | 3 +-- include/linux/blk-mq.h | 3 +-- 12 files changed, 29 insertions(+), 22 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 28b3e6db9849..8e7860268f61 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1189,7 +1189,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert * @at_head: insert request at head or tail of queue - * @done: I/O completion handler * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue @@ -1198,13 +1197,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) * Note: * This function will invoke @done directly if the queue is dead. */ -void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) +void blk_execute_rq_nowait(struct request *rq, bool at_head) { WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); - rq->end_io = done; - blk_account_io_start(rq); if (current->plug) blk_add_rq_to_plug(current->plug, rq); diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index b361583944b9..63b4f6431d2e 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx) spin_unlock_irq(&host->lock); DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); - blk_execute_rq_nowait(rq, true, NULL); + blk_execute_rq_nowait(rq, true); return 0; @@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func) crq->msg_bucket = (u32) rc; DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); - blk_execute_rq_nowait(rq, true, NULL); + blk_execute_rq_nowait(rq, true); return 0; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 72f7c955c707..727c12cbe327 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work) nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; + rq->end_io = nvme_keep_alive_end_io; rq->end_io_data = ctrl; rq->rq_flags |= RQF_QUIET; - blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io); + blk_execute_rq_nowait(rq, false); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 096b1b47d750..a2e89db1cd63 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, blk_flags); if (IS_ERR(req)) return PTR_ERR(req); + req->end_io = nvme_uring_cmd_end_io; req->end_io_data = ioucmd; /* to free bio on completion, as req->bio will be null at that time */ @@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, pdu->meta_buffer = nvme_to_user_ptr(d.metadata); pdu->meta_len = d.metadata_len; - blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io); + blk_execute_rq_nowait(req, false); return -EIOCBQUEUED; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a98a7de0964..0403b6d10bb4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } nvme_init_request(abort_req, &cmd); + abort_req->end_io = abort_endio; abort_req->end_io_data = NULL; abort_req->rq_flags |= RQF_QUIET; - blk_execute_rq_nowait(abort_req, false, abort_endio); + blk_execute_rq_nowait(abort_req, false); /* * The aborted req will be completed on receiving the abort req. @@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) return PTR_ERR(req); nvme_init_request(req, &cmd); + if (opcode == nvme_admin_delete_cq) + req->end_io = nvme_del_cq_end; + else + req->end_io = nvme_del_queue_end; req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); req->rq_flags |= RQF_QUIET; - blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ? - nvme_del_cq_end : nvme_del_queue_end); + blk_execute_rq_nowait(req, false); return 0; } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 5247c24538eb..3cc4d6709c93 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) req->p.rq = rq; queue_work(nvmet_wq, &req->p.work); } else { + rq->end_io = nvmet_passthru_req_done; rq->end_io_data = req; - blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done); + blk_execute_rq_nowait(rq, false); } if (ns) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index cdaca13ac1f1..49ef864df581 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) scmd->cmnd[4] = SCSI_REMOVAL_PREVENT; scmd->cmnd[5] = 0; scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + scmd->allowed = 5; req->rq_flags |= RQF_QUIET; req->timeout = 10 * HZ; - scmd->allowed = 5; + req->end_io = eh_lock_door_done; - blk_execute_rq_nowait(req, true, eh_lock_door_done); + blk_execute_rq_nowait(req, true); } /** diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cbffa712b9f3..118c7b4a8af2 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ - blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io); + srp->rq->end_io = sg_rq_end_io; + blk_execute_rq_nowait(srp->rq, at_head); return 0; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 56a093a90b92..850172a2b8f1 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, memcpy(scmd->cmnd, cmd, scmd->cmd_len); req->timeout = timeout; scmd->allowed = retries; + req->end_io = st_scsi_execute_end; req->end_io_data = SRpnt; - blk_execute_rq_nowait(req, true, st_scsi_execute_end); + blk_execute_rq_nowait(req, true); return 0; } diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 8882b47f76d3..002c19c2b31f 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, req->timeout = 0; req->end_io_data = umap_req; + req->end_io = ufshpb_umap_req_compl_fn; ufshpb_set_unmap_cmd(scmd->cmnd, rgn); scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; - blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn); + blk_execute_rq_nowait(req, true); hpb->stats.umap_req_cnt++; } @@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, blk_rq_append_bio(req, map_req->bio); req->end_io_data = map_req; + req->end_io = ufshpb_map_req_compl_fn; if (unlikely(last)) mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; @@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, map_req->rb.srgn_idx, mem_size); scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; - blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn); + blk_execute_rq_nowait(req, true); hpb->stats.map_req_cnt++; return 0; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index bb3fb18b2316..e6a967ddc08c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd) cmd->priv = scmd->cmnd; - blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG, - pscsi_req_done); + blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG); return 0; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9f07061418db..e2d9daf7e8dd 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *); int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); int blk_rq_append_bio(struct request *rq, struct bio *bio); -void blk_execute_rq_nowait(struct request *rq, bool at_head, - rq_end_io_fn *end_io); +void blk_execute_rq_nowait(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head); struct req_iterator { -- cgit v1.2.3