summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/cxlflash/vlun.c
diff options
context:
space:
mode:
authorUma Krishnan <ukrishn@linux.vnet.ibm.com>2017-06-21 21:14:30 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2017-06-26 15:01:08 -0400
commitc2c292f45029a6850cd14c7c2fa4fc479b8f74aa (patch)
treee73b1f0e026dd547e426aa1f4019c5895ad85ade /drivers/scsi/cxlflash/vlun.c
parent0b09e711189952ff9d411593a8d74ec12a956c57 (diff)
downloadlinux-c2c292f45029a6850cd14c7c2fa4fc479b8f74aa.tar.bz2
scsi: cxlflash: Handle AFU sync failures
AFU sync operations are not currently evaluated for failure. This is acceptable for paths where there is not a dependency on the AFU being consistent with the host. Examples include link reset events and LUN cleanup operations. On paths where there is a dependency, such as a LUN open, a sync failure should be acted upon. In the event of AFU sync failures, either log or cleanup as appropriate for operations that are dependent on a successful sync completion. Update documentation to reflect behavior in the event of an AFU sync failure. Signed-off-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com> Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/cxlflash/vlun.c')
-rw-r--r--drivers/scsi/cxlflash/vlun.c88
1 files changed, 64 insertions, 24 deletions
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 90b5c19f81f0..0800bcba5a00 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -594,7 +594,9 @@ static int grow_lxt(struct afu *afu,
rhte->lxt_cnt = my_new_size;
dma_wmb(); /* Make RHT entry's LXT table size update visible */
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ if (unlikely(rc))
+ rc = -EAGAIN;
/* free old lxt if reallocated */
if (lxt != lxt_old)
@@ -673,8 +675,11 @@ static int shrink_lxt(struct afu *afu,
rhte->lxt_start = lxt;
dma_wmb(); /* Make RHT entry's LXT table update visible */
- if (needs_sync)
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (needs_sync) {
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rc))
+ rc = -EAGAIN;
+ }
if (needs_ws) {
/*
@@ -792,6 +797,21 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
else if (new_size < rhte->lxt_cnt)
rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
+ else {
+ /*
+ * Rare case where there is already sufficient space, just
+ * need to perform a translation sync with the AFU. This
+ * scenario likely follows a previous sync failure during
+ * a resize operation. Accordingly, perform the heavyweight
+ * form of translation sync as it is unknown which type of
+ * resize failed previously.
+ */
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rc)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
resize->hdr.return_flags = 0;
resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
@@ -1084,10 +1104,13 @@ static int clone_lxt(struct afu *afu,
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt;
+ struct sisl_lxt_entry *lxt = NULL;
+ bool locked = false;
u32 ngrps;
u64 aun; /* chunk# allocated by block allocator */
- int i, j;
+ int j;
+ int i = 0;
+ int rc = 0;
ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
@@ -1095,33 +1118,29 @@ static int clone_lxt(struct afu *afu,
/* allocate new LXTs for clone */
lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
GFP_KERNEL);
- if (unlikely(!lxt))
- return -ENOMEM;
+ if (unlikely(!lxt)) {
+ rc = -ENOMEM;
+ goto out;
+ }
/* copy over */
memcpy(lxt, rhte_src->lxt_start,
(sizeof(*lxt) * rhte_src->lxt_cnt));
- /* clone the LBAs in block allocator via ref_cnt */
+ /* clone the LBAs in block allocator via ref_cnt, note that the
+ * block allocator mutex must be held until it is established
+ * that this routine will complete without the need for a
+ * cleanup.
+ */
mutex_lock(&blka->mutex);
+ locked = true;
for (i = 0; i < rhte_src->lxt_cnt; i++) {
aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >>
- MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
-
- mutex_unlock(&blka->mutex);
- kfree(lxt);
- return -EIO;
+ rc = -EIO;
+ goto err;
}
}
- mutex_unlock(&blka->mutex);
- } else {
- lxt = NULL;
}
/*
@@ -1136,10 +1155,31 @@ static int clone_lxt(struct afu *afu,
rhte->lxt_cnt = rhte_src->lxt_cnt;
dma_wmb(); /* Make RHT entry's LXT table size update visible */
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ if (unlikely(rc)) {
+ rc = -EAGAIN;
+ goto err2;
+ }
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
+out:
+ if (locked)
+ mutex_unlock(&blka->mutex);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err2:
+ /* Reset the RHTE */
+ rhte->lxt_cnt = 0;
+ dma_wmb();
+ rhte->lxt_start = NULL;
+ dma_wmb();
+err:
+ /* free the clones already made */
+ for (j = 0; j < i; j++) {
+ aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
+ ba_free(&blka->ba_lun, aun);
+ }
+ kfree(lxt);
+ goto out;
}
/**