summaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-qcom-qspi.c
diff options
context:
space:
mode:
authorDouglas Anderson <dianders@chromium.org>2020-07-09 07:51:44 -0700
committerBjorn Andersson <bjorn.andersson@linaro.org>2020-07-13 16:17:13 -0700
commit2124331411a1ee88437cdd426854253c173464ba (patch)
tree5bf15e9a065ffb30585b6b367096e5a5bfb38ded /drivers/spi/spi-qcom-qspi.c
parentf79a158d37c26015099f4a7fd7d6592bb2ad3054 (diff)
downloadlinux-2124331411a1ee88437cdd426854253c173464ba.tar.bz2
spi: spi-qcom-qspi: Avoid clock setting if not needed
As per recent changes to the spi-qcom-qspi, now when we set the clock we'll call into the interconnect framework and also call the OPP API. Those are expensive operations. Let's avoid calling them if possible. This has a big impact on getting transfer rates back up to where they were (or maybe slightly better) before those patches landed. Fixes: cff80645d6d3 ("spi: spi-qcom-qspi: Add interconnect support") Signed-off-by: Douglas Anderson <dianders@chromium.org> Acked-by: Mark Brown <broonie@kernel.org> Reviewed-by: Rajendra Nayak <rnayak@codeaurora.org> Tested-by: Rajendra Nayak <rnayak@codeaurora.org> Reviewed-by: Mukesh Kumar Savaliya <msavaliy@codeaurora.org> Reviewed-by: Akash Asthana <akashast@codeaurora.org> Link: https://lore.kernel.org/r/20200709075113.v2.1.Ia7cb4f41ce93d37d0a764b47c8a453ce9e9c70ef@changeid Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Diffstat (limited to 'drivers/spi/spi-qcom-qspi.c')
-rw-r--r--drivers/spi/spi-qcom-qspi.c41
1 files changed, 31 insertions, 10 deletions
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 18a59aa23ef8..8fedc605ab7f 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -144,6 +144,7 @@ struct qcom_qspi {
struct icc_path *icc_path_cpu_to_qspi;
struct opp_table *opp_table;
bool has_opp_table;
+ unsigned long last_speed;
/* Lock to protect data accessed by IRQs */
spinlock_t lock;
};
@@ -226,19 +227,13 @@ static void qcom_qspi_handle_err(struct spi_master *master,
spin_unlock_irqrestore(&ctrl->lock, flags);
}
-static int qcom_qspi_transfer_one(struct spi_master *master,
- struct spi_device *slv,
- struct spi_transfer *xfer)
+static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
{
- struct qcom_qspi *ctrl = spi_master_get_devdata(master);
int ret;
- unsigned long speed_hz;
- unsigned long flags;
unsigned int avg_bw_cpu;
- speed_hz = slv->max_speed_hz;
- if (xfer->speed_hz)
- speed_hz = xfer->speed_hz;
+ if (speed_hz == ctrl->last_speed)
+ return 0;
/* In regular operation (SBL_EN=1) core must be 4x transfer clock */
ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
@@ -259,6 +254,28 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
return ret;
}
+ ctrl->last_speed = speed_hz;
+
+ return 0;
+}
+
+static int qcom_qspi_transfer_one(struct spi_master *master,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
+{
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+ unsigned long speed_hz;
+ unsigned long flags;
+
+ speed_hz = slv->max_speed_hz;
+ if (xfer->speed_hz)
+ speed_hz = xfer->speed_hz;
+
+ ret = qcom_qspi_set_speed(ctrl, speed_hz);
+ if (ret)
+ return ret;
+
spin_lock_irqsave(&ctrl->lock, flags);
/* We are half duplex, so either rx or tx will be set */
@@ -602,7 +619,11 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
return ret;
}
- return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ if (ret)
+ return ret;
+
+ return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
}
static int __maybe_unused qcom_qspi_suspend(struct device *dev)