diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-12-07 08:40:13 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-12-07 08:40:13 -0700 |
commit | 8b878ee247ef2691bd69e1bc3df5ae93738ea028 (patch) | |
tree | c3a037f0dfbaedc5da564b9ab74d5a3d572948d0 | |
parent | c616cbee97aed4bc6178f148a7240206dcdb85a6 (diff) | |
parent | d7dcdf9d4e15189ecfda24cc87339a3425448d5c (diff) | |
download | linux-8b878ee247ef2691bd69e1bc3df5ae93738ea028.tar.bz2 |
Merge branch 'nvme-4.20' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph.
* 'nvme-4.20' of git://git.infradead.org/nvme:
nvmet-rdma: fix response use after free
nvme: validate controller state before rescheduling keep alive
-rw-r--r-- | drivers/nvme/host/core.c | 10 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 3 |
2 files changed, 11 insertions, 2 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3cf1b773158e..962012135b62 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -831,6 +831,8 @@ static int nvme_submit_user_cmd(struct request_queue *q, static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) { struct nvme_ctrl *ctrl = rq->end_io_data; + unsigned long flags; + bool startka = false; blk_mq_free_request(rq); @@ -841,7 +843,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) return; } - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + spin_lock_irqsave(&ctrl->lock, flags); + if (ctrl->state == NVME_CTRL_LIVE || + ctrl->state == NVME_CTRL_CONNECTING) + startka = true; + spin_unlock_irqrestore(&ctrl->lock, flags); + if (startka) + schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } static int nvme_keep_alive(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3f7971d3706d..583086dd9cb9 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); + struct nvmet_rdma_queue *queue = cq->cq_context; nvmet_rdma_release_rsp(rsp); @@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) wc->status != IB_WC_WR_FLUSH_ERR)) { pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); - nvmet_rdma_error_comp(rsp->queue); + nvmet_rdma_error_comp(queue); } } |