summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDan Carpenter <dan.carpenter@oracle.com>2017-06-30 11:02:51 +0300
committerMartin K. Petersen <martin.petersen@oracle.com>2017-07-01 17:08:41 -0400
commitc4031db72b4fd2640ff3a7240701397abaacf048 (patch)
tree951c445b0cbb5f59d0a4f8baed380290fc1e3eb0 /drivers/scsi
parentf557e32c0023ea0d67cdaa81b3398550dc1e4876 (diff)
downloadlinux-c4031db72b4fd2640ff3a7240701397abaacf048.tar.bz2
scsi: lpfc: spin_lock_irq() is not nestable
We're calling spin_lock_irq() multiple times, the problem is that on the first spin_unlock_irq() then we will re-enable IRQs and we don't want that. Fixes: 966bb5b71196 ("scsi: lpfc: Break up IO ctx list into a separate get and put list") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Acked-by: James Smart <james.smart@broadcom.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 7dc061a14f95..afc523209845 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -866,44 +866,44 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
unsigned long flags;
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
- spin_lock_irq(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
&phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
- spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
- spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
- spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
&phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
- spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
- spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
- spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
- spin_unlock_irq(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
}