diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-07 10:59:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-07 10:59:32 -0700 |
commit | 9aa900c8094dba7a60dc805ecec1e9f720744ba1 (patch) | |
tree | 3cc09a579f8ea6d3a182076ba722f7c1648e682d /drivers/bus | |
parent | f558b8364e19f9222e7976c64e9367f66bab02cc (diff) | |
parent | 05c8a4fc44a916dd897769ca69b42381f9177ec4 (diff) | |
download | linux-9aa900c8094dba7a60dc805ecec1e9f720744ba1.tar.bz2 |
Merge tag 'char-misc-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH:
"Here is the large set of char/misc driver patches for 5.8-rc1
Included in here are:
- habanalabs driver updates, loads
- mhi bus driver updates
- extcon driver updates
- clk driver updates (approved by the clock maintainer)
- firmware driver updates
- fpga driver updates
- gnss driver updates
- coresight driver updates
- interconnect driver updates
- parport driver updates (it's still alive!)
- nvmem driver updates
- soundwire driver updates
- visorbus driver updates
- w1 driver updates
- various misc driver updates
In short, loads of different driver subsystem updates along with the
drivers as well.
All have been in linux-next for a while with no reported issues"
* tag 'char-misc-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (233 commits)
habanalabs: correctly cast u64 to void*
habanalabs: initialize variable to default value
extcon: arizona: Fix runtime PM imbalance on error
extcon: max14577: Add proper dt-compatible strings
extcon: adc-jack: Fix an error handling path in 'adc_jack_probe()'
extcon: remove redundant assignment to variable idx
w1: omap-hdq: print dev_err if irq flags are not cleared
w1: omap-hdq: fix interrupt handling which did show spurious timeouts
w1: omap-hdq: fix return value to be -1 if there is a timeout
w1: omap-hdq: cleanup to add missing newline for some dev_dbg
/dev/mem: Revoke mappings when a driver claims the region
misc: xilinx-sdfec: convert get_user_pages() --> pin_user_pages()
misc: xilinx-sdfec: cleanup return value in xsdfec_table_write()
misc: xilinx-sdfec: improve get_user_pages_fast() error handling
nvmem: qfprom: remove incorrect write support
habanalabs: handle MMU cache invalidation timeout
habanalabs: don't allow hard reset with open processes
habanalabs: GAUDI does not support soft-reset
habanalabs: add print for soft reset due to event
habanalabs: improve MMU cache invalidation code
...
Diffstat (limited to 'drivers/bus')
-rw-r--r-- | drivers/bus/mhi/core/boot.c | 75 | ||||
-rw-r--r-- | drivers/bus/mhi/core/init.c | 8 | ||||
-rw-r--r-- | drivers/bus/mhi/core/internal.h | 9 | ||||
-rw-r--r-- | drivers/bus/mhi/core/main.c | 197 | ||||
-rw-r--r-- | drivers/bus/mhi/core/pm.c | 229 |
5 files changed, 362 insertions, 156 deletions
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index ebad5eb48e5a..0b38014d040e 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -43,10 +43,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, lower_32_bits(mhi_buf->dma_addr)); mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); - sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; - - if (unlikely(!sequence_id)) - sequence_id = 1; + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, @@ -121,7 +118,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) ee = mhi_get_exec_env(mhi_cntrl); } - dev_dbg(dev, "Waiting for image download completion, current EE: %s\n", + dev_dbg(dev, + "Waiting for RDDM image download via BHIe, current EE:%s\n", TO_MHI_EXEC_STR(ee)); while (retry--) { @@ -152,11 +150,14 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) { void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 rx_status; if (in_panic) return __mhi_download_rddm_in_panic(mhi_cntrl); + dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); + /* Wait for the image download to complete */ wait_event_timeout(mhi_cntrl->state_event, mhi_read_reg_field(mhi_cntrl, base, @@ -174,8 +175,10 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) { void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; rwlock_t *pm_lock = &mhi_cntrl->pm_lock; u32 tx_status, sequence_id; + int ret; read_lock_bh(pm_lock); if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { @@ -183,6 +186,9 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, return -EIO; } + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); + dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n", + sequence_id); mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, upper_32_bits(mhi_buf->dma_addr)); @@ -191,26 +197,25 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, sequence_id); read_unlock_bh(pm_lock); /* Wait for the image download to complete */ - wait_event_timeout(mhi_cntrl->state_event, - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || - mhi_read_reg_field(mhi_cntrl, base, - BHIE_TXVECSTATUS_OFFS, - BHIE_TXVECSTATUS_STATUS_BMSK, - BHIE_TXVECSTATUS_STATUS_SHFT, - &tx_status) || tx_status, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) return -EIO; - return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; + return (!ret) ? -ETIMEDOUT : 0; } static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, @@ -239,14 +244,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, goto invalid_pm_state; } - dev_dbg(dev, "Starting SBL download via BHI\n"); + session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); + dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n", + session_id); mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(dma_addr)); mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(dma_addr)); mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); - session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK; mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); read_unlock_bh(pm_lock); @@ -377,30 +383,18 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, } } -void mhi_fw_load_worker(struct work_struct *work) +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) { - struct mhi_controller *mhi_cntrl; const struct firmware *firmware = NULL; struct image_info *image_info; - struct device *dev; + struct device *dev = &mhi_cntrl->mhi_dev->dev; const char *fw_name; void *buf; dma_addr_t dma_addr; size_t size; int ret; - mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); - dev = &mhi_cntrl->mhi_dev->dev; - - dev_dbg(dev, "Waiting for device to enter PBL from: %s\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee)); - - ret = wait_event_timeout(mhi_cntrl->state_event, - MHI_IN_PBL(mhi_cntrl->ee) || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { dev_err(dev, "Device MHI is not in valid state\n"); return; } @@ -446,7 +440,12 @@ void mhi_fw_load_worker(struct work_struct *work) release_firmware(firmware); /* Error or in EDL mode, we're done */ - if (ret || mhi_cntrl->ee == MHI_EE_EDL) + if (ret) { + dev_err(dev, "MHI did not load SBL, ret:%d\n", ret); + return; + } + + if (mhi_cntrl->ee == MHI_EE_EDL) return; write_lock_irq(&mhi_cntrl->pm_lock); @@ -474,8 +473,10 @@ fw_load_ee_pthru: if (!mhi_cntrl->fbc_download) return; - if (ret) + if (ret) { + dev_err(dev, "MHI did not enter READY state\n"); goto error_read; + } /* Wait for the SBL event */ ret = wait_event_timeout(mhi_cntrl->state_event, @@ -493,6 +494,8 @@ fw_load_ee_pthru: ret = mhi_fw_load_amss(mhi_cntrl, /* Vector table is the last entry */ &image_info->mhi_buf[image_info->entries - 1]); + if (ret) + dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); release_firmware(firmware); diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 1f8c82603179..e43a190a7a36 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -34,6 +34,8 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { [DEV_ST_TRANSITION_READY] = "READY", [DEV_ST_TRANSITION_SBL] = "SBL", [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE", + [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR", + [DEV_ST_TRANSITION_DISABLE] = "DISABLE", }; const char * const mhi_state_str[MHI_STATE_MAX] = { @@ -835,8 +837,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, spin_lock_init(&mhi_cntrl->transition_lock); spin_lock_init(&mhi_cntrl->wlock); INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); - INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); - INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); init_waitqueue_head(&mhi_cntrl->state_event); mhi_cmd = mhi_cntrl->mhi_cmd; @@ -864,6 +864,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mutex_init(&mhi_chan->mutex); init_completion(&mhi_chan->completion); rwlock_init(&mhi_chan->lock); + + /* used in setting bei field of TRE */ + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + mhi_chan->intmod = mhi_event->intmod; } if (mhi_cntrl->bounce_buf) { diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 095d95bc0e37..b1f640b75a94 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -386,6 +386,8 @@ enum dev_st_transition { DEV_ST_TRANSITION_READY, DEV_ST_TRANSITION_SBL, DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_SYS_ERR, + DEV_ST_TRANSITION_DISABLE, DEV_ST_TRANSITION_MAX, }; @@ -452,6 +454,7 @@ enum mhi_pm_state { #define PRIMARY_CMD_RING 0 #define MHI_DEV_WAKE_DB 127 #define MHI_MAX_MTU 0xffff +#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) enum mhi_er_type { MHI_ER_TYPE_INVALID = 0x0, @@ -586,7 +589,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, enum dev_st_transition state); void mhi_pm_st_worker(struct work_struct *work); -void mhi_pm_sys_err_worker(struct work_struct *work); +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); void mhi_fw_load_worker(struct work_struct *work); int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); void mhi_ctrl_ev_task(unsigned long data); @@ -627,6 +630,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, @@ -670,8 +674,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); irqreturn_t mhi_intvec_handler(int irq_number, void *dev); int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - void *buf, void *cb, size_t buf_len, enum mhi_flags flags); - + struct mhi_buf_info *info, enum mhi_flags flags); int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf_info); int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 97e06cc586e4..1f622ce6be8b 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data) return 0; } -static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) { struct mhi_driver *mhi_drv; @@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) if (mhi_drv->status_cb) mhi_drv->status_cb(mhi_dev, cb_reason); } +EXPORT_SYMBOL_GPL(mhi_notify); /* Bind MHI channels to MHI devices */ void mhi_create_devices(struct mhi_controller *mhi_cntrl) @@ -368,30 +369,37 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) return IRQ_HANDLED; } -irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev) +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) { - struct mhi_controller *mhi_cntrl = dev; + struct mhi_controller *mhi_cntrl = priv; + struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state state = MHI_STATE_MAX; enum mhi_pm_state pm_state = 0; enum mhi_ee_type ee = 0; write_lock_irq(&mhi_cntrl->pm_lock); - if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - state = mhi_get_mhi_state(mhi_cntrl); - ee = mhi_cntrl->ee; - mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto exit_intvec; } + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_cntrl->ee; + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + if (state == MHI_STATE_SYS_ERR) { - dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n"); + dev_dbg(dev, "System error detected\n"); pm_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); } write_unlock_irq(&mhi_cntrl->pm_lock); - /* If device in RDDM don't bother processing SYS error */ - if (mhi_cntrl->ee == MHI_EE_RDDM) { - if (mhi_cntrl->ee != ee) { + /* If device supports RDDM don't bother processing SYS error */ + if (mhi_cntrl->rddm_image) { + if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) { mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); wake_up_all(&mhi_cntrl->state_event); } @@ -405,7 +413,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev) if (MHI_IN_PBL(ee)) mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); else - schedule_work(&mhi_cntrl->syserr_worker); + mhi_pm_sys_err_handler(mhi_cntrl); } exit_intvec: @@ -513,7 +521,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, mhi_cntrl->unmap_single(mhi_cntrl, buf_info); result.buf_addr = buf_info->cb_buf; - result.bytes_xferd = xfer_len; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = + min_t(u16, xfer_len, buf_info->len); mhi_del_ring_element(mhi_cntrl, buf_ring); mhi_del_ring_element(mhi_cntrl, tre_ring); local_rp = tre_ring->rp; @@ -597,7 +608,9 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl, result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -EOVERFLOW : 0; - result.bytes_xferd = xfer_len; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); result.buf_addr = buf_info->cb_buf; result.dir = mhi_chan->dir; @@ -722,13 +735,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, { enum mhi_pm_state new_state; + /* skip SYS_ERROR handling if RDDM supported */ + if (mhi_cntrl->ee == MHI_EE_RDDM || + mhi_cntrl->rddm_image) + break; + dev_dbg(dev, "System error detected\n"); write_lock_irq(&mhi_cntrl->pm_lock); new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); write_unlock_irq(&mhi_cntrl->pm_lock); if (new_state == MHI_PM_SYS_ERR_DETECT) - schedule_work(&mhi_cntrl->syserr_worker); + mhi_pm_sys_err_handler(mhi_cntrl); break; } default: @@ -774,9 +792,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, } case MHI_PKT_TYPE_TX_EVENT: chan = MHI_TRE_GET_EV_CHID(local_rp); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } break; default: dev_err(dev, "Unhandled event type: %d\n", type); @@ -819,14 +846,23 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); chan = MHI_TRE_GET_EV_CHID(local_rp); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - - if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; - } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { - parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } } mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); @@ -896,7 +932,7 @@ void mhi_ctrl_ev_task(unsigned long data) } write_unlock_irq(&mhi_cntrl->pm_lock); if (pm_state == MHI_PM_SYS_ERR_DETECT) - schedule_work(&mhi_cntrl->syserr_worker); + mhi_pm_sys_err_handler(mhi_cntrl); } } @@ -918,9 +954,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : mhi_dev->dl_chan; struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - struct mhi_ring *buf_ring = &mhi_chan->buf_ring; - struct mhi_buf_info *buf_info; - struct mhi_tre *mhi_tre; + struct mhi_buf_info buf_info = { }; int ret; /* If MHI host pre-allocates buffers then client drivers cannot queue */ @@ -945,27 +979,15 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, /* Toggle wake to exit out of M2 */ mhi_cntrl->wake_toggle(mhi_cntrl); - /* Generate the TRE */ - buf_info = buf_ring->wp; + buf_info.v_addr = skb->data; + buf_info.cb_buf = skb; + buf_info.len = len; - buf_info->v_addr = skb->data; - buf_info->cb_buf = skb; - buf_info->wp = tre_ring->wp; - buf_info->dir = mhi_chan->dir; - buf_info->len = len; - ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) - goto map_error; - - mhi_tre = tre_ring->wp; - - mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); - mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); - - /* increment WP */ - mhi_add_ring_element(mhi_cntrl, tre_ring); - mhi_add_ring_element(mhi_cntrl, buf_ring); + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return ret; + } if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); @@ -979,11 +1001,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, read_unlock_bh(&mhi_cntrl->pm_lock); return 0; - -map_error: - read_unlock_bh(&mhi_cntrl->pm_lock); - - return ret; } EXPORT_SYMBOL_GPL(mhi_queue_skb); @@ -995,9 +1012,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, mhi_dev->dl_chan; struct device *dev = &mhi_cntrl->mhi_dev->dev; struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - struct mhi_ring *buf_ring = &mhi_chan->buf_ring; - struct mhi_buf_info *buf_info; - struct mhi_tre *mhi_tre; + struct mhi_buf_info buf_info = { }; + int ret; /* If MHI host pre-allocates buffers then client drivers cannot queue */ if (mhi_chan->pre_alloc) @@ -1024,25 +1040,16 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, /* Toggle wake to exit out of M2 */ mhi_cntrl->wake_toggle(mhi_cntrl); - /* Generate the TRE */ - buf_info = buf_ring->wp; - WARN_ON(buf_info->used); - buf_info->p_addr = mhi_buf->dma_addr; - buf_info->pre_mapped = true; - buf_info->cb_buf = mhi_buf; - buf_info->wp = tre_ring->wp; - buf_info->dir = mhi_chan->dir; - buf_info->len = len; - - mhi_tre = tre_ring->wp; - - mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); - mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + buf_info.p_addr = mhi_buf->dma_addr; + buf_info.cb_buf = mhi_buf; + buf_info.pre_mapped = true; + buf_info.len = len; - /* increment WP */ - mhi_add_ring_element(mhi_cntrl, tre_ring); - mhi_add_ring_element(mhi_cntrl, buf_ring); + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return ret; + } if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); @@ -1060,7 +1067,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, EXPORT_SYMBOL_GPL(mhi_queue_dma); int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - void *buf, void *cb, size_t buf_len, enum mhi_flags flags) + struct mhi_buf_info *info, enum mhi_flags flags) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_tre *mhi_tre; @@ -1072,15 +1079,22 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, tre_ring = &mhi_chan->tre_ring; buf_info = buf_ring->wp; - buf_info->v_addr = buf; - buf_info->cb_buf = cb; + WARN_ON(buf_info->used); + buf_info->pre_mapped = info->pre_mapped; + if (info->pre_mapped) + buf_info->p_addr = info->p_addr; + else + buf_info->v_addr = info->v_addr; + buf_info->cb_buf = info->cb_buf; buf_info->wp = tre_ring->wp; buf_info->dir = mhi_chan->dir; - buf_info->len = buf_len; + buf_info->len = info->len; - ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) - return ret; + if (!info->pre_mapped) { + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + } eob = !!(flags & MHI_EOB); eot = !!(flags & MHI_EOT); @@ -1089,7 +1103,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_tre = tre_ring->wp; mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); /* increment WP */ @@ -1106,6 +1120,7 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : mhi_dev->dl_chan; struct mhi_ring *tre_ring; + struct mhi_buf_info buf_info = { }; unsigned long flags; int ret; @@ -1121,7 +1136,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, if (mhi_is_ring_full(mhi_cntrl, tre_ring)) return -ENOMEM; - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + buf_info.v_addr = buf; + buf_info.cb_buf = buf; + buf_info.len = len; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); if (unlikely(ret)) return ret; @@ -1322,7 +1341,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, while (nr_el--) { void *buf; - + struct mhi_buf_info info = { }; buf = kmalloc(len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; @@ -1330,8 +1349,10 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, } /* Prepare transfer descriptors */ - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, - len, MHI_EOT); + info.v_addr = buf; + info.cb_buf = buf; + info.len = len; + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); if (ret) { kfree(buf); goto error_pre_alloc; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index dc83d65f7784..796098078083 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -288,14 +288,18 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - write_lock_irq(&mhi_chan->lock); - if (mhi_chan->db_cfg.reset_req) + if (mhi_chan->db_cfg.reset_req) { + write_lock_irq(&mhi_chan->lock); mhi_chan->db_cfg.db_mode = true; + write_unlock_irq(&mhi_chan->lock); + } + + read_lock_irq(&mhi_chan->lock); /* Only ring DB if ring is not empty */ if (tre_ring->base && tre_ring->wp != tre_ring->rp) mhi_ring_chan_db(mhi_cntrl, mhi_chan); - write_unlock_irq(&mhi_chan->lock); + read_unlock_irq(&mhi_chan->lock); } mhi_cntrl->wake_put(mhi_cntrl, false); @@ -449,19 +453,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, to_mhi_pm_state_str(transition_state)); /* We must notify MHI control driver so it can clean up first */ - if (transition_state == MHI_PM_SYS_ERR_PROCESS) { - /* - * If controller supports RDDM, we do not process - * SYS error state, instead we will jump directly - * to RDDM state - */ - if (mhi_cntrl->rddm_image) { - dev_dbg(dev, - "Controller supports RDDM, so skip SYS_ERR\n"); - return; - } + if (transition_state == MHI_PM_SYS_ERR_PROCESS) mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); - } mutex_lock(&mhi_cntrl->pm_mutex); write_lock_irq(&mhi_cntrl->pm_lock); @@ -527,8 +520,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, mutex_unlock(&mhi_cntrl->pm_mutex); dev_dbg(dev, "Waiting for all pending threads to complete\n"); wake_up_all(&mhi_cntrl->state_event); - flush_work(&mhi_cntrl->st_worker); - flush_work(&mhi_cntrl->fw_worker); dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); @@ -608,13 +599,17 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, } /* SYS_ERR worker */ -void mhi_pm_sys_err_worker(struct work_struct *work) +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) { - struct mhi_controller *mhi_cntrl = container_of(work, - struct mhi_controller, - syserr_worker); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* skip if controller supports RDDM */ + if (mhi_cntrl->rddm_image) { + dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); + return; + } - mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); } /* Device State Transition worker */ @@ -643,7 +638,7 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); write_unlock_irq(&mhi_cntrl->pm_lock); if (MHI_IN_PBL(mhi_cntrl->ee)) - wake_up_all(&mhi_cntrl->state_event); + mhi_fw_load_handler(mhi_cntrl); break; case DEV_ST_TRANSITION_SBL: write_lock_irq(&mhi_cntrl->pm_lock); @@ -662,6 +657,14 @@ void mhi_pm_st_worker(struct work_struct *work) case DEV_ST_TRANSITION_READY: mhi_ready_state_transition(mhi_cntrl); break; + case DEV_ST_TRANSITION_SYS_ERR: + mhi_pm_disable_transition + (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + break; + case DEV_ST_TRANSITION_DISABLE: + mhi_pm_disable_transition + (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + break; default: break; } @@ -669,6 +672,149 @@ void mhi_pm_st_worker(struct work_struct *work) } } +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + int ret; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Return busy if there are any pending resources */ + if (atomic_read(&mhi_cntrl->dev_wake)) + return -EBUSY; + + /* Take MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Could not enter M0/M1 state"); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + dev_info(dev, "Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(dev, "Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M3 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Notify clients about entering LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state cur_state; + int ret; + + dev_info(dev, "Entered with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Notify clients about exiting LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M0 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_resume); + int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) { int ret; @@ -760,6 +906,7 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, int mhi_async_power_up(struct mhi_controller *mhi_cntrl) { + enum mhi_state state; enum mhi_ee_type current_ee; enum dev_st_transition next_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -829,13 +976,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) goto error_bhi_offset; } + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &val) || + !val, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (ret) { + ret = -EIO; + dev_info(dev, "Failed to reset MHI due to syserr state\n"); + goto error_bhi_offset; + } + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + /* Transition to next state */ next_state = MHI_IN_PBL(current_ee) ? DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; - if (next_state == DEV_ST_TRANSITION_PBL) - schedule_work(&mhi_cntrl->fw_worker); - mhi_queue_state_transition(mhi_cntrl, next_state); mutex_unlock(&mhi_cntrl->pm_mutex); @@ -876,7 +1046,12 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), to_mhi_pm_state_str(mhi_cntrl->pm_state)); } - mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + + /* Wait for shutdown to complete */ + flush_work(&mhi_cntrl->st_worker); + mhi_deinit_free_irq(mhi_cntrl); if (!mhi_cntrl->pre_init) { |