summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/cxlflash
diff options
context:
space:
mode:
authorUma Krishnan <ukrishn@linux.vnet.ibm.com>2017-06-21 21:13:32 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2017-06-26 15:01:06 -0400
commit66ea9bcc392017b6df465b6f5847f6eac966a801 (patch)
treee12f8654394261424af0ee67d573e5bafacf9fc8 /drivers/scsi/cxlflash
parentd31b77911505bbd0cd06403bd5eff26d691ad62d (diff)
downloadlinux-66ea9bcc392017b6df465b6f5847f6eac966a801.tar.bz2
scsi: cxlflash: Combine the send queue locks
Currently there are separate spin locks for the two supported I/O queueing models. This makes it difficult to serialize with paths outside the enqueue path. As a design simplification and to support serialization with enqueue operations, move to only a single lock that is used for enqueueing regardless of the queueing model. Signed-off-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com> Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/cxlflash')
-rw-r--r--drivers/scsi/cxlflash/common.h3
-rw-r--r--drivers/scsi/cxlflash/main.c9
2 files changed, 6 insertions, 6 deletions
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 256af819377d..6fc32cfc6026 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -193,7 +193,7 @@ struct hwq {
u32 index; /* Index of this hwq */
atomic_t hsq_credits;
- spinlock_t hsq_slock;
+ spinlock_t hsq_slock; /* Hardware send queue lock */
struct sisl_ioarcb *hsq_start;
struct sisl_ioarcb *hsq_end;
struct sisl_ioarcb *hsq_curr;
@@ -204,7 +204,6 @@ struct hwq {
bool toggle;
s64 room;
- spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
struct irq_poll irqpoll;
} __aligned(cache_line_size());
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index a7d57c343492..64ea597ca98e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -261,7 +261,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
* To avoid the performance penalty of MMIO, spread the update of
* 'room' over multiple commands.
*/
- spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
if (--hwq->room < 0) {
room = readq_be(&hwq->host_map->cmd_room);
if (room <= 0) {
@@ -277,7 +277,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
out:
- spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
return rc;
@@ -1722,7 +1722,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
hwq->hrrq_curr = hwq->hrrq_start;
hwq->toggle = 1;
+
+ /* Initialize spin locks */
spin_lock_init(&hwq->hrrq_slock);
+ spin_lock_init(&hwq->hsq_slock);
/* Initialize SQ */
if (afu_is_sq_cmd_mode(afu)) {
@@ -1731,7 +1734,6 @@ static int start_afu(struct cxlflash_cfg *cfg)
hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
hwq->hsq_curr = hwq->hsq_start;
- spin_lock_init(&hwq->hsq_slock);
atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
}
@@ -1984,7 +1986,6 @@ static int init_afu(struct cxlflash_cfg *cfg)
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
- spin_lock_init(&hwq->rrin_slock);
hwq->room = readq_be(&hwq->host_map->cmd_room);
}