summaryrefslogtreecommitdiffstats
path: root/drivers/bus/mhi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/bus/mhi')
-rw-r--r--drivers/bus/mhi/core/boot.c152
-rw-r--r--drivers/bus/mhi/core/init.c39
-rw-r--r--drivers/bus/mhi/core/internal.h2
-rw-r--r--drivers/bus/mhi/core/pm.c32
4 files changed, 225 insertions, 0 deletions
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index 26422a7da35b..220faa886eb3 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -18,6 +18,158 @@
#include <linux/wait.h>
#include "internal.h"
+/* Setup RDDM vector table for RDDM transfer and program RXVEC */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 sequence_id;
+ unsigned int i;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ dev_dbg(dev, "BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
+
+ if (unlikely(!sequence_id))
+ sequence_id = 1;
+
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+ sequence_id);
+
+ dev_dbg(dev, "Address: %p and len: 0x%lx sequence: %u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+}
+
+/* Collect RDDM buffer during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting RDDM buffer. After
+ * returning from this function, we expect the device to reset.
+ *
+ * Normaly, we read/write pm_state only after grabbing the
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee that this state change would take effect since
+ * we're setting it w/o grabbing pm_lock
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM. In case the device asserts
+ * and a kernel panic follows, device will already be in RDDM.
+ * Do not trigger SYS ERR again and proceed with waiting for
+ * image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee != MHI_EE_RDDM) {
+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ dev_dbg(dev, "Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset so force device to enter RDDM */
+ dev_dbg(dev,
+ "Did not enter RDDM, do a host req reset\n");
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
+ return 0;
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ dev_err(dev, "Did not complete RDDM transfer\n");
+ dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
+
+ return -EIO;
+}
+
+/* Download RDDM image from device */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
+
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index fffcbcdeb6c6..e81cdd0207e2 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -843,6 +843,8 @@ EXPORT_SYMBOL_GPL(mhi_unregister_controller);
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bhie_off;
int ret;
mutex_lock(&mhi_cntrl->pm_mutex);
@@ -851,12 +853,44 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
if (ret)
goto error_dev_ctxt;
+ /*
+ * Allocate RDDM table if specified, this table is for debugging purpose
+ */
+ if (mhi_cntrl->rddm_size) {
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+
+ /*
+ * This controller supports RDDM, so we need to manually clear
+ * BHIE RX registers since POR values are undefined.
+ */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHIE offset\n");
+ goto bhie_error;
+ }
+
+ memset_io(mhi_cntrl->regs + bhie_off + BHIE_RXVECADDR_LOW_OFFS,
+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
+ 4);
+
+ if (mhi_cntrl->rddm_image)
+ mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ }
+
mhi_cntrl->pre_init = true;
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
+bhie_error:
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
error_dev_ctxt:
mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -871,6 +905,11 @@ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
mhi_cntrl->fbc_image = NULL;
}
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
mhi_deinit_dev_ctxt(mhi_cntrl);
mhi_cntrl->pre_init = false;
}
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 0f6246c6162e..e1d3f2ca4922 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -614,6 +614,8 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
/* Memory allocation methods */
static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index bfe0371f6e75..2ba2f6aba9d5 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -450,6 +450,16 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
/* We must notify MHI control driver so it can clean up first */
if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
+ /*
+ * If controller supports RDDM, we do not process
+ * SYS error state, instead we will jump directly
+ * to RDDM state
+ */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev,
+ "Controller supports RDDM, so skip SYS_ERR\n");
+ return;
+ }
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
}
@@ -895,3 +905,25 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
}
EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Check if device is already in RDDM */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* Wait for RDDM event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);