diff options
Diffstat (limited to 'drivers/bus')
| -rw-r--r-- | drivers/bus/fsl-mc/dprc-driver.c | 8 | ||||
| -rw-r--r-- | drivers/bus/fsl-mc/fsl-mc-allocator.c | 9 | ||||
| -rw-r--r-- | drivers/bus/fsl-mc/fsl-mc-msi.c | 79 | ||||
| -rw-r--r-- | drivers/bus/imx-weim.c | 18 | ||||
| -rw-r--r-- | drivers/bus/mhi/core/boot.c | 2 | ||||
| -rw-r--r-- | drivers/bus/mhi/core/init.c | 4 | ||||
| -rw-r--r-- | drivers/bus/mhi/core/internal.h | 9 | ||||
| -rw-r--r-- | drivers/bus/mhi/core/main.c | 24 | ||||
| -rw-r--r-- | drivers/bus/mhi/core/pm.c | 60 | ||||
| -rw-r--r-- | drivers/bus/mhi/pci_generic.c | 58 | ||||
| -rw-r--r-- | drivers/bus/sunxi-rsb.c | 8 | ||||
| -rw-r--r-- | drivers/bus/tegra-gmi.c | 50 | 
12 files changed, 195 insertions, 134 deletions
| diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c index 315e830b6ecd..5e70f9775a0e 100644 --- a/drivers/bus/fsl-mc/dprc-driver.c +++ b/drivers/bus/fsl-mc/dprc-driver.c @@ -400,7 +400,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)  	struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);  	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);  	struct fsl_mc_io *mc_io = mc_dev->mc_io; -	struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc; +	int irq = mc_dev->irqs[0]->virq;  	dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",  		irq_num, smp_processor_id()); @@ -409,7 +409,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)  		return IRQ_HANDLED;  	mutex_lock(&mc_bus->scan_mutex); -	if (!msi_desc || msi_desc->irq != (u32)irq_num) +	if (irq != (u32)irq_num)  		goto out;  	status = 0; @@ -521,7 +521,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)  	 * function that programs the MSI physically in the device  	 */  	error = devm_request_threaded_irq(&mc_dev->dev, -					  irq->msi_desc->irq, +					  irq->virq,  					  dprc_irq0_handler,  					  dprc_irq0_handler_thread,  					  IRQF_NO_SUSPEND | IRQF_ONESHOT, @@ -771,7 +771,7 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)  	(void)disable_dprc_irq(mc_dev); -	devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev); +	devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);  	fsl_mc_free_irqs(mc_dev);  } diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index 6c513556911e..dced427ca8ba 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -350,7 +350,6 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,  			     unsigned int irq_count)  {  	unsigned int i; -	struct msi_desc *msi_desc;  	struct fsl_mc_device_irq *irq_resources;  	struct fsl_mc_device_irq *mc_dev_irq;  	int error; @@ -388,16 +387,12 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,  		mc_dev_irq->resource.type = res_pool->type;  		mc_dev_irq->resource.data = mc_dev_irq;  		mc_dev_irq->resource.parent_pool = res_pool; +		mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i); +		mc_dev_irq->resource.id = mc_dev_irq->virq;  		INIT_LIST_HEAD(&mc_dev_irq->resource.node);  		list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);  	} -	for_each_msi_entry(msi_desc, &mc_bus_dev->dev) { -		mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index]; -		mc_dev_irq->msi_desc = msi_desc; -		mc_dev_irq->resource.id = msi_desc->irq; -	} -  	res_pool->max_count = irq_count;  	res_pool->free_count = irq_count;  	mc_bus->irq_resources = irq_resources; diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c index cf974870ba55..5e0e4393ce4d 100644 --- a/drivers/bus/fsl-mc/fsl-mc-msi.c +++ b/drivers/bus/fsl-mc/fsl-mc-msi.c @@ -29,7 +29,7 @@ static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,  	 * Make the base hwirq value for ICID*10000 so it is readable  	 * as a decimal value in /proc/interrupts.  	 */ -	return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000)); +	return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));  }  static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg, @@ -58,11 +58,11 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)  }  static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev, -				   struct fsl_mc_device_irq *mc_dev_irq) +				   struct fsl_mc_device_irq *mc_dev_irq, +				   struct msi_desc *msi_desc)  {  	int error;  	struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev; -	struct msi_desc *msi_desc = mc_dev_irq->msi_desc;  	struct dprc_irq_cfg irq_cfg;  	/* @@ -122,14 +122,14 @@ static void fsl_mc_msi_write_msg(struct irq_data *irq_data,  	struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);  	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);  	struct fsl_mc_device_irq *mc_dev_irq = -		&mc_bus->irq_resources[msi_desc->fsl_mc.msi_index]; +		&mc_bus->irq_resources[msi_desc->msi_index];  	msi_desc->msg = *msg;  	/*  	 * Program the MSI (paddr, value) pair in the device:  	 */ -	__fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq); +	__fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);  }  static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info) @@ -170,6 +170,7 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,  		fsl_mc_msi_update_dom_ops(info);  	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)  		fsl_mc_msi_update_chip_ops(info); +	info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;  	domain = msi_create_irq_domain(fwnode, info, parent);  	if (domain) @@ -210,61 +211,21 @@ struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)  	return msi_domain;  } -static void fsl_mc_msi_free_descs(struct device *dev) -{ -	struct msi_desc *desc, *tmp; - -	list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { -		list_del(&desc->list); -		free_msi_entry(desc); -	} -} - -static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count) - -{ -	unsigned int i; -	int error; -	struct msi_desc *msi_desc; - -	for (i = 0; i < irq_count; i++) { -		msi_desc = alloc_msi_entry(dev, 1, NULL); -		if (!msi_desc) { -			dev_err(dev, "Failed to allocate msi entry\n"); -			error = -ENOMEM; -			goto cleanup_msi_descs; -		} - -		msi_desc->fsl_mc.msi_index = i; -		INIT_LIST_HEAD(&msi_desc->list); -		list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); -	} - -	return 0; - -cleanup_msi_descs: -	fsl_mc_msi_free_descs(dev); -	return error; -} - -int fsl_mc_msi_domain_alloc_irqs(struct device *dev, -				 unsigned int irq_count) +int fsl_mc_msi_domain_alloc_irqs(struct device *dev,  unsigned int irq_count)  {  	struct irq_domain *msi_domain;  	int error; -	if (!list_empty(dev_to_msi_list(dev))) +	msi_domain = dev_get_msi_domain(dev); +	if (!msi_domain)  		return -EINVAL; -	error = fsl_mc_msi_alloc_descs(dev, irq_count); -	if (error < 0) +	error = msi_setup_device_data(dev); +	if (error)  		return error; -	msi_domain = dev_get_msi_domain(dev); -	if (!msi_domain) { -		error = -EINVAL; -		goto cleanup_msi_descs; -	} +	if (msi_first_desc(dev, MSI_DESC_ALL)) +		return -EINVAL;  	/*  	 * NOTE: Calling this function will trigger the invocation of the @@ -272,15 +233,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,  	 */  	error = msi_domain_alloc_irqs(msi_domain, dev, irq_count); -	if (error) { +	if (error)  		dev_err(dev, "Failed to allocate IRQs\n"); -		goto cleanup_msi_descs; -	} - -	return 0; - -cleanup_msi_descs: -	fsl_mc_msi_free_descs(dev);  	return error;  } @@ -293,9 +247,4 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev)  		return;  	msi_domain_free_irqs(msi_domain, dev); - -	if (list_empty(dev_to_msi_list(dev))) -		return; - -	fsl_mc_msi_free_descs(dev);  } diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 28bb65a5613f..bccb275b65ba 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -21,6 +21,7 @@ struct imx_weim_devtype {  	unsigned int	cs_stride;  	unsigned int	wcr_offset;  	unsigned int	wcr_bcm; +	unsigned int	wcr_cont_bclk;  };  static const struct imx_weim_devtype imx1_weim_devtype = { @@ -41,6 +42,7 @@ static const struct imx_weim_devtype imx50_weim_devtype = {  	.cs_stride	= 0x18,  	.wcr_offset	= 0x90,  	.wcr_bcm	= BIT(0), +	.wcr_cont_bclk	= BIT(3),  };  static const struct imx_weim_devtype imx51_weim_devtype = { @@ -206,8 +208,20 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)  	if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {  		if (devtype->wcr_bcm) {  			reg = readl(base + devtype->wcr_offset); -			writel(reg | devtype->wcr_bcm, -				base + devtype->wcr_offset); +			reg |= devtype->wcr_bcm; + +			if (of_property_read_bool(pdev->dev.of_node, +						"fsl,continuous-burst-clk")) { +				if (devtype->wcr_cont_bclk) { +					reg |= devtype->wcr_cont_bclk; +				} else { +					dev_err(&pdev->dev, +						"continuous burst clk not supported.\n"); +					return -EINVAL; +				} +			} + +			writel(reg, base + devtype->wcr_offset);  		} else {  			dev_err(&pdev->dev, "burst clk mode not supported.\n");  			return -EINVAL; diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 0a972620a403..74295d3cc662 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -417,7 +417,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)  	}  	/* wait for ready on pass through or any other execution environment */ -	if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL) +	if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))  		goto fw_load_ready_state;  	fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 5aaca6d0f52b..046f407dc5d6 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -79,7 +79,8 @@ static const char * const mhi_pm_state_str[] = {  const char *to_mhi_pm_state_str(enum mhi_pm_state state)  { -	int index = find_last_bit((unsigned long *)&state, 32); +	unsigned long pm_state = state; +	int index = find_last_bit(&pm_state, 32);  	if (index >= ARRAY_SIZE(mhi_pm_state_str))  		return "Invalid State"; @@ -788,6 +789,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,  		mhi_chan->offload_ch = ch_cfg->offload_channel;  		mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;  		mhi_chan->pre_alloc = ch_cfg->auto_queue; +		mhi_chan->wake_capable = ch_cfg->wake_capable;  		/*  		 * If MHI host allocates buffers, then the channel direction diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 3a732afaf73e..e2e10474a9d9 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -390,7 +390,8 @@ extern const char * const mhi_ee_str[MHI_EE_MAX];  #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \  			ee == MHI_EE_EDL) - +#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) +#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)  #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \  				 ee == MHI_EE_FP) @@ -681,8 +682,12 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);  void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,  		      struct image_info *img_info);  void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); + +/* Automatically allocate and queue inbound buffers */ +#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)  int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, -			struct mhi_chan *mhi_chan); +			struct mhi_chan *mhi_chan, unsigned int flags); +  int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,  		       struct mhi_chan *mhi_chan);  void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index b15c5bc37dd4..ffde617f93a3 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -1065,7 +1065,7 @@ void mhi_ctrl_ev_task(unsigned long data)  		return;  	} -	/* Process ctrl events events */ +	/* Process ctrl events */  	ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);  	/* @@ -1430,7 +1430,7 @@ exit_unprepare_channel:  }  int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, -			struct mhi_chan *mhi_chan) +			struct mhi_chan *mhi_chan, unsigned int flags)  {  	int ret = 0;  	struct device *dev = &mhi_chan->mhi_dev->dev; @@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,  	if (ret)  		goto error_pm_state; +	if (mhi_chan->dir == DMA_FROM_DEVICE) +		mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); +  	/* Pre-allocate buffer for xfer ring */  	if (mhi_chan->pre_alloc) {  		int nr_el = get_nr_avail_ring_elements(mhi_cntrl, @@ -1464,6 +1467,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,  		while (nr_el--) {  			void *buf;  			struct mhi_buf_info info = { }; +  			buf = kmalloc(len, GFP_KERNEL);  			if (!buf) {  				ret = -ENOMEM; @@ -1609,8 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)  	read_unlock_bh(&mhi_cntrl->pm_lock);  } -/* Move channel to start state */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)  {  	int ret, dir;  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; @@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)  		if (!mhi_chan)  			continue; -		ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); +		ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);  		if (ret)  			goto error_open_chan;  	} @@ -1639,8 +1642,19 @@ error_open_chan:  	return ret;  } + +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ +	return __mhi_prepare_for_transfer(mhi_dev, 0); +}  EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); +int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev) +{ +	return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue); +  void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)  {  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index fb99e3727155..4aae0baea008 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -42,7 +42,7 @@   * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT   *     LD_ERR_FATAL_DETECT -> DISABLE   */ -static struct mhi_pm_transitions const dev_state_transitions[] = { +static const struct mhi_pm_transitions dev_state_transitions[] = {  	/* L0 States */  	{  		MHI_PM_DISABLE, @@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)  }  EXPORT_SYMBOL_GPL(mhi_pm_suspend); -int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)  {  	struct mhi_chan *itr, *tmp;  	struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))  		return -EIO; -	if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) -		return -EINVAL; +	if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { +		dev_warn(dev, "Resuming from non M3 state (%s)\n", +			 TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); +		if (!force) +			return -EINVAL; +	}  	/* Notify clients about exiting LPM */  	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { @@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)  	return 0;  } + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ +	return __mhi_pm_resume(mhi_cntrl, false); +}  EXPORT_SYMBOL_GPL(mhi_pm_resume); +int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) +{ +	return __mhi_pm_resume(mhi_cntrl, true); +} +EXPORT_SYMBOL_GPL(mhi_pm_resume_force); +  int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)  {  	int ret; @@ -1038,7 +1053,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)  	enum mhi_ee_type current_ee;  	enum dev_st_transition next_state;  	struct device *dev = &mhi_cntrl->mhi_dev->dev; -	u32 val; +	u32 interval_us = 25000; /* poll register field every 25 milliseconds */  	int ret;  	dev_info(dev, "Requested to power ON\n"); @@ -1055,10 +1070,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)  	mutex_lock(&mhi_cntrl->pm_mutex);  	mhi_cntrl->pm_state = MHI_PM_DISABLE; -	ret = mhi_init_irq_setup(mhi_cntrl); -	if (ret) -		goto error_setup_irq; -  	/* Setup BHI INTVEC */  	write_lock_irq(&mhi_cntrl->pm_lock);  	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); @@ -1068,11 +1079,11 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)  	write_unlock_irq(&mhi_cntrl->pm_lock);  	/* Confirm that the device is in valid exec env */ -	if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { +	if (!MHI_POWER_UP_CAPABLE(current_ee)) {  		dev_err(dev, "%s is not a valid EE for power on\n",  			TO_MHI_EXEC_STR(current_ee));  		ret = -EIO; -		goto error_async_power_up; +		goto error_exit;  	}  	state = mhi_get_mhi_state(mhi_cntrl); @@ -1081,20 +1092,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)  	if (state == MHI_STATE_SYS_ERR) {  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -		ret = wait_event_timeout(mhi_cntrl->state_event, -				MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || -					mhi_read_reg_field(mhi_cntrl, -							   mhi_cntrl->regs, -							   MHICTRL, -							   MHICTRL_RESET_MASK, -							   MHICTRL_RESET_SHIFT, -							   &val) || -					!val, -				msecs_to_jiffies(mhi_cntrl->timeout_ms)); -		if (!ret) { -			ret = -EIO; +		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, +				 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, +				 interval_us); +		if (ret) {  			dev_info(dev, "Failed to reset MHI due to syserr state\n"); -			goto error_async_power_up; +			goto error_exit;  		}  		/* @@ -1104,6 +1107,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);  	} +	ret = mhi_init_irq_setup(mhi_cntrl); +	if (ret) +		goto error_exit; +  	/* Transition to next state */  	next_state = MHI_IN_PBL(current_ee) ?  		DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; @@ -1116,10 +1123,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)  	return 0; -error_async_power_up: -	mhi_deinit_free_irq(mhi_cntrl); - -error_setup_irq: +error_exit:  	mhi_cntrl->pm_state = MHI_PM_DISABLE;  	mutex_unlock(&mhi_cntrl->pm_mutex); diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index 59a4896a8030..3a258a677df8 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -20,7 +20,7 @@  #define MHI_PCI_DEFAULT_BAR_NUM 0 -#define MHI_POST_RESET_DELAY_MS 500 +#define MHI_POST_RESET_DELAY_MS 2000  #define HEALTH_CHECK_PERIOD (HZ * 2) @@ -403,7 +403,50 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {  	.dma_data_width = 32,  }; +static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { +	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), +	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), +	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), +	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), +	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), +	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), +	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), +	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), +	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), +	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), +	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), +	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), +}; + +static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { +	/* first ring is control+data and DIAG ring */ +	MHI_EVENT_CONFIG_CTRL(0, 2048), +	/* Hardware channels request dedicated hardware event rings */ +	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), +	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static const struct mhi_controller_config modem_sierra_em919x_config = { +	.max_channels = 128, +	.timeout_ms = 24000, +	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), +	.ch_cfg = mhi_sierra_em919x_channels, +	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), +	.event_cfg = modem_sierra_em919x_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { +	.name = "sierra-em919x", +	.config = &modem_sierra_em919x_config, +	.bar_num = MHI_PCI_DEFAULT_BAR_NUM, +	.dma_data_width = 32, +	.sideband_wake = false, +}; +  static const struct pci_device_id mhi_pci_id_table[] = { +	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ +	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), +		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },  	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),  		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },  	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -423,6 +466,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {  	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */  	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),  		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, +	/* T99W175 (sdx55), Based on Qualcomm new baseline */ +	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), +		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },  	/* MV31-W (Cinterion) */  	{ PCI_DEVICE(0x1269, 0x00b3),  		.driver_data = (kernel_ulong_t) &mhi_mv31_info }, @@ -529,18 +575,12 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,  	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];  	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); -	err = pci_set_dma_mask(pdev, dma_mask); +	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);  	if (err) {  		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");  		return err;  	} -	err = pci_set_consistent_dma_mask(pdev, dma_mask); -	if (err) { -		dev_err(&pdev->dev, "set consistent dma mask failed\n"); -		return err; -	} -  	pci_set_master(pdev);  	return 0; @@ -1018,7 +1058,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)  	 * context.  	 */  	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -		mhi_power_down(mhi_cntrl, false); +		mhi_power_down(mhi_cntrl, true);  		mhi_unprepare_after_power_down(mhi_cntrl);  	} diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 6f225dddc74f..4566e730ef2b 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -687,11 +687,11 @@ err_clk_disable:  static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)  { -	/* Keep the clock and PM reference counts consistent. */ -	if (pm_runtime_status_suspended(rsb->dev)) -		pm_runtime_resume(rsb->dev);  	reset_control_assert(rsb->rstc); -	clk_disable_unprepare(rsb->clk); + +	/* Keep the clock and PM reference counts consistent. */ +	if (!pm_runtime_status_suspended(rsb->dev)) +		clk_disable_unprepare(rsb->clk);  }  static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev) diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c index a6570789f7af..35b59f92fa66 100644 --- a/drivers/bus/tegra-gmi.c +++ b/drivers/bus/tegra-gmi.c @@ -13,8 +13,11 @@  #include <linux/io.h>  #include <linux/module.h>  #include <linux/of_device.h> +#include <linux/pm_runtime.h>  #include <linux/reset.h> +#include <soc/tegra/common.h> +  #define TEGRA_GMI_CONFIG		0x00  #define TEGRA_GMI_CONFIG_GO		BIT(31)  #define TEGRA_GMI_BUS_WIDTH_32BIT	BIT(30) @@ -54,9 +57,10 @@ static int tegra_gmi_enable(struct tegra_gmi *gmi)  {  	int err; -	err = clk_prepare_enable(gmi->clk); -	if (err < 0) { -		dev_err(gmi->dev, "failed to enable clock: %d\n", err); +	pm_runtime_enable(gmi->dev); +	err = pm_runtime_resume_and_get(gmi->dev); +	if (err) { +		pm_runtime_disable(gmi->dev);  		return err;  	} @@ -83,7 +87,9 @@ static void tegra_gmi_disable(struct tegra_gmi *gmi)  	writel(config, gmi->base + TEGRA_GMI_CONFIG);  	reset_control_assert(gmi->rst); -	clk_disable_unprepare(gmi->clk); + +	pm_runtime_put_sync_suspend(gmi->dev); +	pm_runtime_force_suspend(gmi->dev);  }  static int tegra_gmi_parse_dt(struct tegra_gmi *gmi) @@ -213,6 +219,7 @@ static int tegra_gmi_probe(struct platform_device *pdev)  	if (!gmi)  		return -ENOMEM; +	platform_set_drvdata(pdev, gmi);  	gmi->dev = dev;  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -232,6 +239,10 @@ static int tegra_gmi_probe(struct platform_device *pdev)  		return PTR_ERR(gmi->rst);  	} +	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); +	if (err) +		return err; +  	err = tegra_gmi_parse_dt(gmi);  	if (err)  		return err; @@ -247,8 +258,6 @@ static int tegra_gmi_probe(struct platform_device *pdev)  		return err;  	} -	platform_set_drvdata(pdev, gmi); -  	return 0;  } @@ -262,6 +271,34 @@ static int tegra_gmi_remove(struct platform_device *pdev)  	return 0;  } +static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev) +{ +	struct tegra_gmi *gmi = dev_get_drvdata(dev); +	int err; + +	err = clk_prepare_enable(gmi->clk); +	if (err < 0) { +		dev_err(gmi->dev, "failed to enable clock: %d\n", err); +		return err; +	} + +	return 0; +} + +static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev) +{ +	struct tegra_gmi *gmi = dev_get_drvdata(dev); + +	clk_disable_unprepare(gmi->clk); + +	return 0; +} + +static const struct dev_pm_ops tegra_gmi_pm = { +	SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume, +			   NULL) +}; +  static const struct of_device_id tegra_gmi_id_table[] = {  	{ .compatible = "nvidia,tegra20-gmi", },  	{ .compatible = "nvidia,tegra30-gmi", }, @@ -275,6 +312,7 @@ static struct platform_driver tegra_gmi_driver = {  	.driver = {  		.name		= "tegra-gmi",  		.of_match_table	= tegra_gmi_id_table, +		.pm = &tegra_gmi_pm,  	},  };  module_platform_driver(tegra_gmi_driver); |