diff options
author | Tony Lindgren <tony@atomide.com> | 2020-06-16 09:25:03 -0700 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2020-06-16 09:25:03 -0700 |
commit | 07c7b547a79605f1041d55b84d91a4a4d9c5b363 (patch) | |
tree | d98c1adacc8c65b475c325b427e54b8205b0013d /drivers/bus | |
parent | e4a8fc054340f4df761f6a73335f8fdc0b7ac4fd (diff) | |
parent | b3a9e3b9622ae10064826dccb4f7a52bd88c7407 (diff) | |
download | linux-07c7b547a79605f1041d55b84d91a4a4d9c5b363.tar.bz2 |
Merge tag 'v5.8-rc1' into fixes
Linux 5.8-rc1
Diffstat (limited to 'drivers/bus')
-rw-r--r-- | drivers/bus/Kconfig | 41 | ||||
-rw-r--r-- | drivers/bus/Makefile | 4 | ||||
-rw-r--r-- | drivers/bus/arm-integrator-lm.c | 128 | ||||
-rw-r--r-- | drivers/bus/bt1-apb.c | 421 | ||||
-rw-r--r-- | drivers/bus/bt1-axi.c | 314 | ||||
-rw-r--r-- | drivers/bus/mhi/core/boot.c | 75 | ||||
-rw-r--r-- | drivers/bus/mhi/core/init.c | 8 | ||||
-rw-r--r-- | drivers/bus/mhi/core/internal.h | 9 | ||||
-rw-r--r-- | drivers/bus/mhi/core/main.c | 197 | ||||
-rw-r--r-- | drivers/bus/mhi/core/pm.c | 229 | ||||
-rw-r--r-- | drivers/bus/ti-sysc.c | 25 | ||||
-rw-r--r-- | drivers/bus/vexpress-config.c | 354 |
12 files changed, 1571 insertions, 234 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 6d4e4497b59b..c8818e3b1079 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -20,6 +20,15 @@ config ARM_CCI400_PORT_CTRL Low level power management driver for CCI400 cache coherent interconnect for ARM platforms. +config ARM_INTEGRATOR_LM + bool "ARM Integrator Logic Module bus" + depends on HAS_IOMEM + depends on ARCH_INTEGRATOR || COMPILE_TEST + default ARCH_INTEGRATOR + help + Say y here to enable support for the ARM Logic Module bus + found on the ARM Integrator AP (Application Platform) + config BRCMSTB_GISB_ARB bool "Broadcom STB GISB bus arbiter" depends on ARM || ARM64 || MIPS @@ -29,6 +38,36 @@ config BRCMSTB_GISB_ARB arbiter. This driver provides timeout and target abort error handling and internal bus master decoding. +config BT1_APB + bool "Baikal-T1 APB-bus driver" + depends on MIPS_BAIKAL_T1 || COMPILE_TEST + select REGMAP_MMIO + help + Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs. + IO requests are routed to this bus by means of the DW AMBA 3 AXI + Interconnect. In case of any APB protocol collisions, slave device + not responding on timeout an IRQ is raised with an erroneous address + reported to the APB terminator (APB Errors Handler Block). This + driver provides the interrupt handler to detect the erroneous + address, prints an error message about the address fault, updates an + errors counter. The counter and the APB-bus operations timeout can be + accessed via corresponding sysfs nodes. + +config BT1_AXI + bool "Baikal-T1 AXI-bus driver" + depends on MIPS_BAIKAL_T1 || COMPILE_TEST + select MFD_SYSCON + help + AXI3-bus is the main communication bus connecting all high-speed + peripheral IP-cores with RAM controller and with MIPS P5600 cores on + Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI + Interconnect (so called AXI Main Interconnect) routing IO requests + from one SoC block to another. This driver provides a way to detect + any bus protocol errors and device not responding situations by + means of an embedded on top of the interconnect errors handler + block (EHB). AXI Interconnect QoS arbitration tuning is currently + unsupported. + config MOXTET tristate "CZ.NIC Turris Mox module configuration bus" depends on SPI_MASTER && OF @@ -183,7 +222,7 @@ config UNIPHIER_SYSTEM_BUS needed to use on-board devices connected to UniPhier SoCs. config VEXPRESS_CONFIG - bool "Versatile Express configuration bus" + tristate "Versatile Express configuration bus" default y if ARCH_VEXPRESS depends on ARM || ARM64 depends on OF diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 05f32cd694a4..397e35392bff 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -5,7 +5,7 @@ # Interconnect bus drivers for ARM platforms obj-$(CONFIG_ARM_CCI) += arm-cci.o - +obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o obj-$(CONFIG_MOXTET) += moxtet.o @@ -13,6 +13,8 @@ obj-$(CONFIG_MOXTET) += moxtet.o # DPAA2 fsl-mc bus obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ +obj-$(CONFIG_BT1_APB) += bt1-apb.o +obj-$(CONFIG_BT1_AXI) += bt1-axi.o obj-$(CONFIG_IMX_WEIM) += imx-weim.o obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c new file mode 100644 index 000000000000..845b6c43fef8 --- /dev/null +++ b/drivers/bus/arm-integrator-lm.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM Integrator Logical Module bus driver + * Copyright (C) 2020 Linaro Ltd. + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * See the device tree bindings for this block for more details on the + * hardware. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/bitops.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +/* All information about the connected logic modules are in here */ +#define INTEGRATOR_SC_DEC_OFFSET 0x10 + +/* Base address for the expansion modules */ +#define INTEGRATOR_AP_EXP_BASE 0xc0000000 +#define INTEGRATOR_AP_EXP_STRIDE 0x10000000 + +static int integrator_lm_populate(int num, struct device *dev) +{ + struct device_node *np = dev->of_node; + struct device_node *child; + u32 base; + int ret; + + base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE); + + /* Walk over the child nodes and see what chipselects we use */ + for_each_available_child_of_node(np, child) { + struct resource res; + + ret = of_address_to_resource(child, 0, &res); + if (ret) { + dev_info(dev, "no valid address on child\n"); + continue; + } + + /* First populate the syscon then any devices */ + if (res.start == base) { + dev_info(dev, "populate module @0x%08x from DT\n", + base); + ret = of_platform_default_populate(child, NULL, dev); + if (ret) { + dev_err(dev, "failed to populate module\n"); + return ret; + } + } + } + + return 0; +} + +static const struct of_device_id integrator_ap_syscon_match[] = { + { .compatible = "arm,integrator-ap-syscon"}, + { }, +}; + +static int integrator_ap_lm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *syscon; + static struct regmap *map; + u32 val; + int ret; + int i; + + /* Look up the system controller */ + syscon = of_find_matching_node(NULL, integrator_ap_syscon_match); + if (!syscon) { + dev_err(dev, + "could not find Integrator/AP system controller\n"); + return -ENODEV; + } + map = syscon_node_to_regmap(syscon); + if (IS_ERR(map)) { + dev_err(dev, + "could not find Integrator/AP system controller\n"); + return PTR_ERR(map); + } + + ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val); + if (ret) { + dev_err(dev, "could not read from Integrator/AP syscon\n"); + return ret; + } + + /* Loop over the connected modules */ + for (i = 0; i < 4; i++) { + if (!(val & BIT(4 + i))) + continue; + + dev_info(dev, "detected module in slot %d\n", i); + ret = integrator_lm_populate(i, dev); + if (ret) + return ret; + } + + return 0; +} + +static const struct of_device_id integrator_ap_lm_match[] = { + { .compatible = "arm,integrator-ap-lm"}, + { }, +}; + +static struct platform_driver integrator_ap_lm_driver = { + .probe = integrator_ap_lm_probe, + .driver = { + .name = "integratorap-lm", + .of_match_table = integrator_ap_lm_match, + }, +}; +module_platform_driver(integrator_ap_lm_driver); +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Integrator AP Logical Module driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c new file mode 100644 index 000000000000..b25ff941e7c7 --- /dev/null +++ b/drivers/bus/bt1-apb.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * + * Baikal-T1 APB-bus driver + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/atomic.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/nmi.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/reset.h> +#include <linux/time64.h> +#include <linux/clk.h> +#include <linux/sysfs.h> + +#define APB_EHB_ISR 0x00 +#define APB_EHB_ISR_PENDING BIT(0) +#define APB_EHB_ISR_MASK BIT(1) +#define APB_EHB_ADDR 0x04 +#define APB_EHB_TIMEOUT 0x08 + +#define APB_EHB_TIMEOUT_MIN 0x000003FFU +#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU + +/* + * struct bt1_apb - Baikal-T1 APB EHB private data + * @dev: Pointer to the device structure. + * @regs: APB EHB registers map. + * @res: No-device error injection memory region. + * @irq: Errors IRQ number. + * @rate: APB-bus reference clock rate. + * @pclk: APB-reference clock. + * @prst: APB domain reset line. + * @count: Number of errors detected. + */ +struct bt1_apb { + struct device *dev; + + struct regmap *regs; + void __iomem *res; + int irq; + + unsigned long rate; + struct clk *pclk; + + struct reset_control *prst; + + atomic_t count; +}; + +static const struct regmap_config bt1_apb_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = APB_EHB_TIMEOUT, + .fast_io = true +}; + +static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n) +{ + u64 timeout = (u64)n * USEC_PER_SEC; + + do_div(timeout, apb->rate); + + return timeout; + +} + +static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb, + unsigned long timeout) +{ + u64 n = (u64)timeout * apb->rate; + + do_div(n, USEC_PER_SEC); + + return n; + +} + +static irqreturn_t bt1_apb_isr(int irq, void *data) +{ + struct bt1_apb *apb = data; + u32 addr = 0; + + regmap_read(apb->regs, APB_EHB_ADDR, &addr); + + dev_crit_ratelimited(apb->dev, + "APB-bus fault %d: Slave access timeout at 0x%08x\n", + atomic_inc_return(&apb->count), + addr); + + /* + * Print backtrace on each CPU. This might be pointless if the fault + * has happened on the same CPU as the IRQ handler is executed or + * the other core proceeded further execution despite the error. + * But if it's not, by looking at the trace we would get straight to + * the cause of the problem. + */ + trigger_all_cpu_backtrace(); + + regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0); + + return IRQ_HANDLED; +} + +static void bt1_apb_clear_data(void *data) +{ + struct bt1_apb *apb = data; + struct platform_device *pdev = to_platform_device(apb->dev); + + platform_set_drvdata(pdev, NULL); +} + +static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bt1_apb *apb; + int ret; + + apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL); + if (!apb) + return ERR_PTR(-ENOMEM); + + ret = devm_add_action(dev, bt1_apb_clear_data, apb); + if (ret) { + dev_err(dev, "Can't add APB EHB data clear action\n"); + return ERR_PTR(ret); + } + + apb->dev = dev; + atomic_set(&apb->count, 0); + platform_set_drvdata(pdev, apb); + + return apb; +} + +static int bt1_apb_request_regs(struct bt1_apb *apb) +{ + struct platform_device *pdev = to_platform_device(apb->dev); + void __iomem *regs; + + regs = devm_platform_ioremap_resource_byname(pdev, "ehb"); + if (IS_ERR(regs)) { + dev_err(apb->dev, "Couldn't map APB EHB registers\n"); + return PTR_ERR(regs); + } + + apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg); + if (IS_ERR(apb->regs)) { + dev_err(apb->dev, "Couldn't create APB EHB regmap\n"); + return PTR_ERR(apb->regs); + } + + apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev"); + if (IS_ERR(apb->res)) + dev_err(apb->dev, "Couldn't map reserved region\n"); + + return PTR_ERR_OR_ZERO(apb->res); +} + +static int bt1_apb_request_rst(struct bt1_apb *apb) +{ + int ret; + + apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst"); + if (IS_ERR(apb->prst)) { + dev_warn(apb->dev, "Couldn't get reset control line\n"); + return PTR_ERR(apb->prst); + } + + ret = reset_control_deassert(apb->prst); + if (ret) + dev_err(apb->dev, "Failed to deassert the reset line\n"); + + return ret; +} + +static void bt1_apb_disable_clk(void *data) +{ + struct bt1_apb *apb = data; + + clk_disable_unprepare(apb->pclk); +} + +static int bt1_apb_request_clk(struct bt1_apb *apb) +{ + int ret; + + apb->pclk = devm_clk_get(apb->dev, "pclk"); + if (IS_ERR(apb->pclk)) { + dev_err(apb->dev, "Couldn't get APB clock descriptor\n"); + return PTR_ERR(apb->pclk); + } + + ret = clk_prepare_enable(apb->pclk); + if (ret) { + dev_err(apb->dev, "Couldn't enable the APB clock\n"); + return ret; + } + + ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb); + if (ret) { + dev_err(apb->dev, "Can't add APB EHB clocks disable action\n"); + return ret; + } + + apb->rate = clk_get_rate(apb->pclk); + if (!apb->rate) { + dev_err(apb->dev, "Invalid clock rate\n"); + return -EINVAL; + } + + return 0; +} + +static void bt1_apb_clear_irq(void *data) +{ + struct bt1_apb *apb = data; + + regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0); +} + +static int bt1_apb_request_irq(struct bt1_apb *apb) +{ + struct platform_device *pdev = to_platform_device(apb->dev); + int ret; + + apb->irq = platform_get_irq(pdev, 0); + if (apb->irq < 0) + return apb->irq; + + ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED, + "bt1-apb", apb); + if (ret) { + dev_err(apb->dev, "Couldn't request APB EHB IRQ\n"); + return ret; + } + + ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb); + if (ret) { + dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n"); + return ret; + } + + /* Unmask IRQ and clear it' pending flag. */ + regmap_update_bits(apb->regs, APB_EHB_ISR, + APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK, + APB_EHB_ISR_MASK); + + return 0; +} + +static ssize_t count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct bt1_apb *apb = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count)); +} +static DEVICE_ATTR_RO(count); + +static ssize_t timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct bt1_apb *apb = dev_get_drvdata(dev); + unsigned long timeout; + int ret; + u32 n; + + ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n); + if (ret) + return ret; + + timeout = bt1_apb_n_to_timeout_us(apb, n); + + return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout); +} + +static ssize_t timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bt1_apb *apb = dev_get_drvdata(dev); + unsigned long timeout; + int ret; + u32 n; + + if (kstrtoul(buf, 0, &timeout) < 0) + return -EINVAL; + + n = bt1_apb_timeout_to_n_us(apb, timeout); + n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX); + + ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n); + + return ret ?: count; +} +static DEVICE_ATTR_RW(timeout); + +static ssize_t inject_error_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n"); +} + +static ssize_t inject_error_store(struct device *dev, + struct device_attribute *attr, + const char *data, size_t count) +{ + struct bt1_apb *apb = dev_get_drvdata(dev); + + /* + * Either dummy read from the unmapped address in the APB IO area + * or manually set the IRQ status. + */ + if (sysfs_streq(data, "nodev")) + readl(apb->res); + else if (sysfs_streq(data, "irq")) + regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, + APB_EHB_ISR_PENDING); + else + return -EINVAL; + + return count; +} +static DEVICE_ATTR_RW(inject_error); + +static struct attribute *bt1_apb_sysfs_attrs[] = { + &dev_attr_count.attr, + &dev_attr_timeout.attr, + &dev_attr_inject_error.attr, + NULL +}; +ATTRIBUTE_GROUPS(bt1_apb_sysfs); + +static void bt1_apb_remove_sysfs(void *data) +{ + struct bt1_apb *apb = data; + + device_remove_groups(apb->dev, bt1_apb_sysfs_groups); +} + +static int bt1_apb_init_sysfs(struct bt1_apb *apb) +{ + int ret; + + ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups); + if (ret) { + dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n"); + return ret; + } + + ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb); + if (ret) + dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n"); + + return ret; +} + +static int bt1_apb_probe(struct platform_device *pdev) +{ + struct bt1_apb *apb; + int ret; + + apb = bt1_apb_create_data(pdev); + if (IS_ERR(apb)) + return PTR_ERR(apb); + + ret = bt1_apb_request_regs(apb); + if (ret) + return ret; + + ret = bt1_apb_request_rst(apb); + if (ret) + return ret; + + ret = bt1_apb_request_clk(apb); + if (ret) + return ret; + + ret = bt1_apb_request_irq(apb); + if (ret) + return ret; + + ret = bt1_apb_init_sysfs(apb); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id bt1_apb_of_match[] = { + { .compatible = "baikal,bt1-apb" }, + { } +}; +MODULE_DEVICE_TABLE(of, bt1_apb_of_match); + +static struct platform_driver bt1_apb_driver = { + .probe = bt1_apb_probe, + .driver = { + .name = "bt1-apb", + .of_match_table = bt1_apb_of_match + } +}; +module_platform_driver(bt1_apb_driver); + +MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>"); +MODULE_DESCRIPTION("Baikal-T1 APB-bus driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c new file mode 100644 index 000000000000..e7a6744acc7b --- /dev/null +++ b/drivers/bus/bt1-axi.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * + * Baikal-T1 AXI-bus driver + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/atomic.h> +#include <linux/regmap.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/nmi.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/reset.h> +#include <linux/sysfs.h> + +#define BT1_AXI_WERRL 0x110 +#define BT1_AXI_WERRH 0x114 +#define BT1_AXI_WERRH_TYPE BIT(23) +#define BT1_AXI_WERRH_ADDR_FLD 24 +#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD) + +/* + * struct bt1_axi - Baikal-T1 AXI-bus private data + * @dev: Pointer to the device structure. + * @qos_regs: AXI Interconnect QoS tuning registers. + * @sys_regs: Baikal-T1 System Controller registers map. + * @irq: Errors IRQ number. + * @aclk: AXI reference clock. + * @arst: AXI Interconnect reset line. + * @count: Number of errors detected. + */ +struct bt1_axi { + struct device *dev; + + void __iomem *qos_regs; + struct regmap *sys_regs; + int irq; + + struct clk *aclk; + + struct reset_control *arst; + + atomic_t count; +}; + +static irqreturn_t bt1_axi_isr(int irq, void *data) +{ + struct bt1_axi *axi = data; + u32 low = 0, high = 0; + + regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low); + regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high); + + dev_crit_ratelimited(axi->dev, + "AXI-bus fault %d: %s at 0x%x%08x\n", + atomic_inc_return(&axi->count), + high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error", + high, low); + + /* + * Print backtrace on each CPU. This might be pointless if the fault + * has happened on the same CPU as the IRQ handler is executed or + * the other core proceeded further execution despite the error. + * But if it's not, by looking at the trace we would get straight to + * the cause of the problem. + */ + trigger_all_cpu_backtrace(); + + return IRQ_HANDLED; +} + +static void bt1_axi_clear_data(void *data) +{ + struct bt1_axi *axi = data; + struct platform_device *pdev = to_platform_device(axi->dev); + + platform_set_drvdata(pdev, NULL); +} + +static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bt1_axi *axi; + int ret; + + axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL); + if (!axi) + return ERR_PTR(-ENOMEM); + + ret = devm_add_action(dev, bt1_axi_clear_data, axi); + if (ret) { + dev_err(dev, "Can't add AXI EHB data clear action\n"); + return ERR_PTR(ret); + } + + axi->dev = dev; + atomic_set(&axi->count, 0); + platform_set_drvdata(pdev, axi); + + return axi; +} + +static int bt1_axi_request_regs(struct bt1_axi *axi) +{ + struct platform_device *pdev = to_platform_device(axi->dev); + struct device *dev = axi->dev; + + axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); + if (IS_ERR(axi->sys_regs)) { + dev_err(dev, "Couldn't find syscon registers\n"); + return PTR_ERR(axi->sys_regs); + } + + axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos"); + if (IS_ERR(axi->qos_regs)) + dev_err(dev, "Couldn't map AXI-bus QoS registers\n"); + + return PTR_ERR_OR_ZERO(axi->qos_regs); +} + +static int bt1_axi_request_rst(struct bt1_axi *axi) +{ + int ret; + + axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst"); + if (IS_ERR(axi->arst)) { + dev_warn(axi->dev, "Couldn't get reset control line\n"); + return PTR_ERR(axi->arst); + } + + ret = reset_control_deassert(axi->arst); + if (ret) + dev_err(axi->dev, "Failed to deassert the reset line\n"); + + return ret; +} + +static void bt1_axi_disable_clk(void *data) +{ + struct bt1_axi *axi = data; + + clk_disable_unprepare(axi->aclk); +} + +static int bt1_axi_request_clk(struct bt1_axi *axi) +{ + int ret; + + axi->aclk = devm_clk_get(axi->dev, "aclk"); + if (IS_ERR(axi->aclk)) { + dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n"); + return PTR_ERR(axi->aclk); + } + + ret = clk_prepare_enable(axi->aclk); + if (ret) { + dev_err(axi->dev, "Couldn't enable the AXI clock\n"); + return ret; + } + + ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi); + if (ret) + dev_err(axi->dev, "Can't add AXI clock disable action\n"); + + return ret; +} + +static int bt1_axi_request_irq(struct bt1_axi *axi) +{ + struct platform_device *pdev = to_platform_device(axi->dev); + int ret; + + axi->irq = platform_get_irq(pdev, 0); + if (axi->irq < 0) + return axi->irq; + + ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED, + "bt1-axi", axi); + if (ret) + dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n"); + + return ret; +} + +static ssize_t count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bt1_axi *axi = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count)); +} +static DEVICE_ATTR_RO(count); + +static ssize_t inject_error_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n"); +} + +static ssize_t inject_error_store(struct device *dev, + struct device_attribute *attr, + const char *data, size_t count) +{ + struct bt1_axi *axi = dev_get_drvdata(dev); + + /* + * Performing unaligned read from the memory will cause the CM2 bus + * error while unaligned writing - the AXI bus write error handled + * by this driver. + */ + if (sysfs_streq(data, "bus")) + readb(axi->qos_regs); + else if (sysfs_streq(data, "unaligned")) + writeb(0, axi->qos_regs); + else + return -EINVAL; + + return count; +} +static DEVICE_ATTR_RW(inject_error); + +static struct attribute *bt1_axi_sysfs_attrs[] = { + &dev_attr_count.attr, + &dev_attr_inject_error.attr, + NULL +}; +ATTRIBUTE_GROUPS(bt1_axi_sysfs); + +static void bt1_axi_remove_sysfs(void *data) +{ + struct bt1_axi *axi = data; + + device_remove_groups(axi->dev, bt1_axi_sysfs_groups); +} + +static int bt1_axi_init_sysfs(struct bt1_axi *axi) +{ + int ret; + + ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups); + if (ret) { + dev_err(axi->dev, "Failed to add sysfs files group\n"); + return ret; + } + + ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi); + if (ret) + dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n"); + + return ret; +} + +static int bt1_axi_probe(struct platform_device *pdev) +{ + struct bt1_axi *axi; + int ret; + + axi = bt1_axi_create_data(pdev); + if (IS_ERR(axi)) + return PTR_ERR(axi); + + ret = bt1_axi_request_regs(axi); + if (ret) + return ret; + + ret = bt1_axi_request_rst(axi); + if (ret) + return ret; + + ret = bt1_axi_request_clk(axi); + if (ret) + return ret; + + ret = bt1_axi_request_irq(axi); + if (ret) + return ret; + + ret = bt1_axi_init_sysfs(axi); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id bt1_axi_of_match[] = { + { .compatible = "baikal,bt1-axi" }, + { } +}; +MODULE_DEVICE_TABLE(of, bt1_axi_of_match); + +static struct platform_driver bt1_axi_driver = { + .probe = bt1_axi_probe, + .driver = { + .name = "bt1-axi", + .of_match_table = bt1_axi_of_match + } +}; +module_platform_driver(bt1_axi_driver); + +MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>"); +MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index ebad5eb48e5a..0b38014d040e 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -43,10 +43,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, lower_32_bits(mhi_buf->dma_addr)); mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); - sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; - - if (unlikely(!sequence_id)) - sequence_id = 1; + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, @@ -121,7 +118,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) ee = mhi_get_exec_env(mhi_cntrl); } - dev_dbg(dev, "Waiting for image download completion, current EE: %s\n", + dev_dbg(dev, + "Waiting for RDDM image download via BHIe, current EE:%s\n", TO_MHI_EXEC_STR(ee)); while (retry--) { @@ -152,11 +150,14 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) { void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 rx_status; if (in_panic) return __mhi_download_rddm_in_panic(mhi_cntrl); + dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); + /* Wait for the image download to complete */ wait_event_timeout(mhi_cntrl->state_event, mhi_read_reg_field(mhi_cntrl, base, @@ -174,8 +175,10 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) { void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; rwlock_t *pm_lock = &mhi_cntrl->pm_lock; u32 tx_status, sequence_id; + int ret; read_lock_bh(pm_lock); if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { @@ -183,6 +186,9 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, return -EIO; } + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); + dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n", + sequence_id); mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, upper_32_bits(mhi_buf->dma_addr)); @@ -191,26 +197,25 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, sequence_id); read_unlock_bh(pm_lock); /* Wait for the image download to complete */ - wait_event_timeout(mhi_cntrl->state_event, - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || - mhi_read_reg_field(mhi_cntrl, base, - BHIE_TXVECSTATUS_OFFS, - BHIE_TXVECSTATUS_STATUS_BMSK, - BHIE_TXVECSTATUS_STATUS_SHFT, - &tx_status) || tx_status, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) return -EIO; - return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; + return (!ret) ? -ETIMEDOUT : 0; } static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, @@ -239,14 +244,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, goto invalid_pm_state; } - dev_dbg(dev, "Starting SBL download via BHI\n"); + session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); + dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n", + session_id); mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(dma_addr)); mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(dma_addr)); mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); - session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK; mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); read_unlock_bh(pm_lock); @@ -377,30 +383,18 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, } } -void mhi_fw_load_worker(struct work_struct *work) +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) { - struct mhi_controller *mhi_cntrl; const struct firmware *firmware = NULL; struct image_info *image_info; - struct device *dev; + struct device *dev = &mhi_cntrl->mhi_dev->dev; const char *fw_name; void *buf; dma_addr_t dma_addr; size_t size; int ret; - mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); - dev = &mhi_cntrl->mhi_dev->dev; - - dev_dbg(dev, "Waiting for device to enter PBL from: %s\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee)); - - ret = wait_event_timeout(mhi_cntrl->state_event, - MHI_IN_PBL(mhi_cntrl->ee) || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { dev_err(dev, "Device MHI is not in valid state\n"); return; } @@ -446,7 +440,12 @@ void mhi_fw_load_worker(struct work_struct *work) release_firmware(firmware); /* Error or in EDL mode, we're done */ - if (ret || mhi_cntrl->ee == MHI_EE_EDL) + if (ret) { + dev_err(dev, "MHI did not load SBL, ret:%d\n", ret); + return; + } + + if (mhi_cntrl->ee == MHI_EE_EDL) return; write_lock_irq(&mhi_cntrl->pm_lock); @@ -474,8 +473,10 @@ fw_load_ee_pthru: if (!mhi_cntrl->fbc_download) return; - if (ret) + if (ret) { + dev_err(dev, "MHI did not enter READY state\n"); goto error_read; + } /* Wait for the SBL event */ ret = wait_event_timeout(mhi_cntrl->state_event, @@ -493,6 +494,8 @@ fw_load_ee_pthru: ret = mhi_fw_load_amss(mhi_cntrl, /* Vector table is the last entry */ &image_info->mhi_buf[image_info->entries - 1]); + if (ret) + dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); release_firmware(firmware); diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 1f8c82603179..e43a190a7a36 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -34,6 +34,8 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { [DEV_ST_TRANSITION_READY] = "READY", [DEV_ST_TRANSITION_SBL] = "SBL", [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE", + [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR", + [DEV_ST_TRANSITION_DISABLE] = "DISABLE", }; const char * const mhi_state_str[MHI_STATE_MAX] = { @@ -835,8 +837,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, spin_lock_init(&mhi_cntrl->transition_lock); spin_lock_init(&mhi_cntrl->wlock); INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); - INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); - INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); init_waitqueue_head(&mhi_cntrl->state_event); mhi_cmd = mhi_cntrl->mhi_cmd; @@ -864,6 +864,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mutex_init(&mhi_chan->mutex); init_completion(&mhi_chan->completion); rwlock_init(&mhi_chan->lock); + + /* used in setting bei field of TRE */ + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + mhi_chan->intmod = mhi_event->intmod; } if (mhi_cntrl->bounce_buf) { diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 095d95bc0e37..b1f640b75a94 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -386,6 +386,8 @@ enum dev_st_transition { DEV_ST_TRANSITION_READY, DEV_ST_TRANSITION_SBL, DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_SYS_ERR, + DEV_ST_TRANSITION_DISABLE, DEV_ST_TRANSITION_MAX, }; @@ -452,6 +454,7 @@ enum mhi_pm_state { #define PRIMARY_CMD_RING 0 #define MHI_DEV_WAKE_DB 127 #define MHI_MAX_MTU 0xffff +#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) enum mhi_er_type { MHI_ER_TYPE_INVALID = 0x0, @@ -586,7 +589,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, enum dev_st_transition state); void mhi_pm_st_worker(struct work_struct *work); -void mhi_pm_sys_err_worker(struct work_struct *work); +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); void mhi_fw_load_worker(struct work_struct *work); int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); void mhi_ctrl_ev_task(unsigned long data); @@ -627,6 +630,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, @@ -670,8 +674,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); irqreturn_t mhi_intvec_handler(int irq_number, void *dev); int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - void *buf, void *cb, size_t buf_len, enum mhi_flags flags); - + struct mhi_buf_info *info, enum mhi_flags flags); int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf_info); int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 97e06cc586e4..1f622ce6be8b 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data) return 0; } -static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) { struct mhi_driver *mhi_drv; @@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) if (mhi_drv->status_cb) mhi_drv->status_cb(mhi_dev, cb_reason); } +EXPORT_SYMBOL_GPL(mhi_notify); /* Bind MHI channels to MHI devices */ void mhi_create_devices(struct mhi_controller *mhi_cntrl) @@ -368,30 +369,37 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) return IRQ_HANDLED; } -irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev) +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) { - struct mhi_controller *mhi_cntrl = dev; + struct mhi_controller *mhi_cntrl = priv; + struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state state = MHI_STATE_MAX; enum mhi_pm_state pm_state = 0; enum mhi_ee_type ee = 0; write_lock_irq(&mhi_cntrl->pm_lock); - if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - state = mhi_get_mhi_state(mhi_cntrl); - ee = mhi_cntrl->ee; - mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto exit_intvec; } + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_cntrl->ee; + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + if (state == MHI_STATE_SYS_ERR) { - dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n"); + dev_dbg(dev, "System error detected\n"); pm_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); } write_unlock_irq(&mhi_cntrl->pm_lock); - /* If device in RDDM don't bother processing SYS error */ - if (mhi_cntrl->ee == MHI_EE_RDDM) { - if (mhi_cntrl->ee != ee) { + /* If device supports RDDM don't bother processing SYS error */ + if (mhi_cntrl->rddm_image) { + if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) { mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); wake_up_all(&mhi_cntrl->state_event); } @@ -405,7 +413,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev) if (MHI_IN_PBL(ee)) mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); else - schedule_work(&mhi_cntrl->syserr_worker); + mhi_pm_sys_err_handler(mhi_cntrl); } exit_intvec: @@ -513,7 +521,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, mhi_cntrl->unmap_single(mhi_cntrl, buf_info); result.buf_addr = buf_info->cb_buf; - result.bytes_xferd = xfer_len; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = + min_t(u16, xfer_len, buf_info->len); mhi_del_ring_element(mhi_cntrl, buf_ring); mhi_del_ring_element(mhi_cntrl, tre_ring); local_rp = tre_ring->rp; @@ -597,7 +608,9 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl, result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -EOVERFLOW : 0; - result.bytes_xferd = xfer_len; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); result.buf_addr = buf_info->cb_buf; result.dir = mhi_chan->dir; @@ -722,13 +735,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, { enum mhi_pm_state new_state; + /* skip SYS_ERROR handling if RDDM supported */ + if (mhi_cntrl->ee == MHI_EE_RDDM || + mhi_cntrl->rddm_image) + break; + dev_dbg(dev, "System error detected\n"); write_lock_irq(&mhi_cntrl->pm_lock); new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); write_unlock_irq(&mhi_cntrl->pm_lock); if (new_state == MHI_PM_SYS_ERR_DETECT) - schedule_work(&mhi_cntrl->syserr_worker); + mhi_pm_sys_err_handler(mhi_cntrl); break; } default: @@ -774,9 +792,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, } case MHI_PKT_TYPE_TX_EVENT: chan = MHI_TRE_GET_EV_CHID(local_rp); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } break; default: dev_err(dev, "Unhandled event type: %d\n", type); @@ -819,14 +846,23 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); chan = MHI_TRE_GET_EV_CHID(local_rp); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - - if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; - } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { - parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } } mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); @@ -896,7 +932,7 @@ void mhi_ctrl_ev_task(unsigned long data) } write_unlock_irq(&mhi_cntrl->pm_lock); if (pm_state == MHI_PM_SYS_ERR_DETECT) - schedule_work(&mhi_cntrl->syserr_worker); + mhi_pm_sys_err_handler(mhi_cntrl); } } @@ -918,9 +954,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : mhi_dev->dl_chan; struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - struct mhi_ring *buf_ring = &mhi_chan->buf_ring; - struct mhi_buf_info *buf_info; - struct mhi_tre *mhi_tre; + struct mhi_buf_info buf_info = { }; int ret; /* If MHI host pre-allocates buffers then client drivers cannot queue */ @@ -945,27 +979,15 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, /* Toggle wake to exit out of M2 */ mhi_cntrl->wake_toggle(mhi_cntrl); - /* Generate the TRE */ - buf_info = buf_ring->wp; + buf_info.v_addr = skb->data; + buf_info.cb_buf = skb; + buf_info.len = len; - buf_info->v_addr = skb->data; - buf_info->cb_buf = skb; - buf_info->wp = tre_ring->wp; - buf_info->dir = mhi_chan->dir; - buf_info->len = len; - ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) - goto map_error; - - mhi_tre = tre_ring->wp; - - mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); - mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); - - /* increment WP */ - mhi_add_ring_element(mhi_cntrl, tre_ring); - mhi_add_ring_element(mhi_cntrl, buf_ring); + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return ret; + } if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); @@ -979,11 +1001,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, read_unlock_bh(&mhi_cntrl->pm_lock); return 0; - -map_error: - read_unlock_bh(&mhi_cntrl->pm_lock); - - return ret; } EXPORT_SYMBOL_GPL(mhi_queue_skb); @@ -995,9 +1012,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, mhi_dev->dl_chan; struct device *dev = &mhi_cntrl->mhi_dev->dev; struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - struct mhi_ring *buf_ring = &mhi_chan->buf_ring; - struct mhi_buf_info *buf_info; - struct mhi_tre *mhi_tre; + struct mhi_buf_info buf_info = { }; + int ret; /* If MHI host pre-allocates buffers then client drivers cannot queue */ if (mhi_chan->pre_alloc) @@ -1024,25 +1040,16 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, /* Toggle wake to exit out of M2 */ mhi_cntrl->wake_toggle(mhi_cntrl); - /* Generate the TRE */ - buf_info = buf_ring->wp; - WARN_ON(buf_info->used); - buf_info->p_addr = mhi_buf->dma_addr; - buf_info->pre_mapped = true; - buf_info->cb_buf = mhi_buf; - buf_info->wp = tre_ring->wp; - buf_info->dir = mhi_chan->dir; - buf_info->len = len; - - mhi_tre = tre_ring->wp; - - mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); - mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + buf_info.p_addr = mhi_buf->dma_addr; + buf_info.cb_buf = mhi_buf; + buf_info.pre_mapped = true; + buf_info.len = len; - /* increment WP */ - mhi_add_ring_element(mhi_cntrl, tre_ring); - mhi_add_ring_element(mhi_cntrl, buf_ring); + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); + if (unlikely(ret)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return ret; + } if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); @@ -1060,7 +1067,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, EXPORT_SYMBOL_GPL(mhi_queue_dma); int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - void *buf, void *cb, size_t buf_len, enum mhi_flags flags) + struct mhi_buf_info *info, enum mhi_flags flags) { struct mhi_ring *buf_ring, *tre_ring; struct mhi_tre *mhi_tre; @@ -1072,15 +1079,22 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, tre_ring = &mhi_chan->tre_ring; buf_info = buf_ring->wp; - buf_info->v_addr = buf; - buf_info->cb_buf = cb; + WARN_ON(buf_info->used); + buf_info->pre_mapped = info->pre_mapped; + if (info->pre_mapped) + buf_info->p_addr = info->p_addr; + else + buf_info->v_addr = info->v_addr; + buf_info->cb_buf = info->cb_buf; buf_info->wp = tre_ring->wp; buf_info->dir = mhi_chan->dir; - buf_info->len = buf_len; + buf_info->len = info->len; - ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) - return ret; + if (!info->pre_mapped) { + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + } eob = !!(flags & MHI_EOB); eot = !!(flags & MHI_EOT); @@ -1089,7 +1103,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_tre = tre_ring->wp; mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); /* increment WP */ @@ -1106,6 +1120,7 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : mhi_dev->dl_chan; struct mhi_ring *tre_ring; + struct mhi_buf_info buf_info = { }; unsigned long flags; int ret; @@ -1121,7 +1136,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, if (mhi_is_ring_full(mhi_cntrl, tre_ring)) return -ENOMEM; - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + buf_info.v_addr = buf; + buf_info.cb_buf = buf; + buf_info.len = len; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); if (unlikely(ret)) return ret; @@ -1322,7 +1341,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, while (nr_el--) { void *buf; - + struct mhi_buf_info info = { }; buf = kmalloc(len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; @@ -1330,8 +1349,10 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, } /* Prepare transfer descriptors */ - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, - len, MHI_EOT); + info.v_addr = buf; + info.cb_buf = buf; + info.len = len; + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); if (ret) { kfree(buf); goto error_pre_alloc; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index dc83d65f7784..796098078083 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -288,14 +288,18 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - write_lock_irq(&mhi_chan->lock); - if (mhi_chan->db_cfg.reset_req) + if (mhi_chan->db_cfg.reset_req) { + write_lock_irq(&mhi_chan->lock); mhi_chan->db_cfg.db_mode = true; + write_unlock_irq(&mhi_chan->lock); + } + + read_lock_irq(&mhi_chan->lock); /* Only ring DB if ring is not empty */ if (tre_ring->base && tre_ring->wp != tre_ring->rp) mhi_ring_chan_db(mhi_cntrl, mhi_chan); - write_unlock_irq(&mhi_chan->lock); + read_unlock_irq(&mhi_chan->lock); } mhi_cntrl->wake_put(mhi_cntrl, false); @@ -449,19 +453,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, to_mhi_pm_state_str(transition_state)); /* We must notify MHI control driver so it can clean up first */ - if (transition_state == MHI_PM_SYS_ERR_PROCESS) { - /* - * If controller supports RDDM, we do not process - * SYS error state, instead we will jump directly - * to RDDM state - */ - if (mhi_cntrl->rddm_image) { - dev_dbg(dev, - "Controller supports RDDM, so skip SYS_ERR\n"); - return; - } + if (transition_state == MHI_PM_SYS_ERR_PROCESS) mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); - } mutex_lock(&mhi_cntrl->pm_mutex); write_lock_irq(&mhi_cntrl->pm_lock); @@ -527,8 +520,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, mutex_unlock(&mhi_cntrl->pm_mutex); dev_dbg(dev, "Waiting for all pending threads to complete\n"); wake_up_all(&mhi_cntrl->state_event); - flush_work(&mhi_cntrl->st_worker); - flush_work(&mhi_cntrl->fw_worker); dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); @@ -608,13 +599,17 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, } /* SYS_ERR worker */ -void mhi_pm_sys_err_worker(struct work_struct *work) +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) { - struct mhi_controller *mhi_cntrl = container_of(work, - struct mhi_controller, - syserr_worker); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* skip if controller supports RDDM */ + if (mhi_cntrl->rddm_image) { + dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); + return; + } - mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); } /* Device State Transition worker */ @@ -643,7 +638,7 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); write_unlock_irq(&mhi_cntrl->pm_lock); if (MHI_IN_PBL(mhi_cntrl->ee)) - wake_up_all(&mhi_cntrl->state_event); + mhi_fw_load_handler(mhi_cntrl); break; case DEV_ST_TRANSITION_SBL: write_lock_irq(&mhi_cntrl->pm_lock); @@ -662,6 +657,14 @@ void mhi_pm_st_worker(struct work_struct *work) case DEV_ST_TRANSITION_READY: mhi_ready_state_transition(mhi_cntrl); break; + case DEV_ST_TRANSITION_SYS_ERR: + mhi_pm_disable_transition + (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + break; + case DEV_ST_TRANSITION_DISABLE: + mhi_pm_disable_transition + (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + break; default: break; } @@ -669,6 +672,149 @@ void mhi_pm_st_worker(struct work_struct *work) } } +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + int ret; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Return busy if there are any pending resources */ + if (atomic_read(&mhi_cntrl->dev_wake)) + return -EBUSY; + + /* Take MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Could not enter M0/M1 state"); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + dev_info(dev, "Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(dev, "Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M3 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Notify clients about entering LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state cur_state; + int ret; + + dev_info(dev, "Entered with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Notify clients about exiting LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M0 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_resume); + int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) { int ret; @@ -760,6 +906,7 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, int mhi_async_power_up(struct mhi_controller *mhi_cntrl) { + enum mhi_state state; enum mhi_ee_type current_ee; enum dev_st_transition next_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -829,13 +976,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) goto error_bhi_offset; } + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &val) || + !val, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (ret) { + ret = -EIO; + dev_info(dev, "Failed to reset MHI due to syserr state\n"); + goto error_bhi_offset; + } + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + /* Transition to next state */ next_state = MHI_IN_PBL(current_ee) ? DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; - if (next_state == DEV_ST_TRANSITION_PBL) - schedule_work(&mhi_cntrl->fw_worker); - mhi_queue_state_transition(mhi_cntrl, next_state); mutex_unlock(&mhi_cntrl->pm_mutex); @@ -876,7 +1046,12 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), to_mhi_pm_state_str(mhi_cntrl->pm_state)); } - mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + + /* Wait for shutdown to complete */ + flush_work(&mhi_cntrl->st_worker); + mhi_deinit_free_irq(mhi_cntrl); if (!mhi_cntrl->pre_init) { diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index db9541f38505..bb54fb514e40 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1339,13 +1339,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, - 0), - /* Some timers on omap4 and later */ - SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, - 0), - SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, - 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, @@ -1468,6 +1461,13 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), + SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0), + SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0), + /* Some timers on omap4 and later */ + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0), + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0), + SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0), SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), @@ -2794,6 +2794,17 @@ static int sysc_init_soc(struct sysc *ddata) if (match && match->data) sysc_soc->soc = (int)match->data; + /* Ignore devices that are not available on HS and EMU SoCs */ + if (!sysc_soc->general_purpose) { + switch (sysc_soc->soc) { + case SOC_3430 ... SOC_3630: + sysc_add_disabled(0x48304000); /* timer12 */ + break; + default: + break; + }; + } + match = soc_device_match(sysc_soc_feat_match); if (!match) return 0; diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index ff70575b2db6..a58ac0c8e282 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c @@ -6,10 +6,61 @@ #include <linux/err.h> #include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> #include <linux/of.h> +#include <linux/platform_device.h> #include <linux/of_device.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> #include <linux/vexpress.h> +#define SYS_MISC 0x0 +#define SYS_MISC_MASTERSITE (1 << 14) + +#define SYS_PROCID0 0x24 +#define SYS_PROCID1 0x28 +#define SYS_HBI_MASK 0xfff +#define SYS_PROCIDx_HBI_SHIFT 0 + +#define SYS_CFGDATA 0x40 + +#define SYS_CFGCTRL 0x44 +#define SYS_CFGCTRL_START (1 << 31) +#define SYS_CFGCTRL_WRITE (1 << 30) +#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) +#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) +#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) +#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) +#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) + +#define SYS_CFGSTAT 0x48 +#define SYS_CFGSTAT_ERR (1 << 1) +#define SYS_CFGSTAT_COMPLETE (1 << 0) + +#define VEXPRESS_SITE_MB 0 +#define VEXPRESS_SITE_DB1 1 +#define VEXPRESS_SITE_DB2 2 +#define VEXPRESS_SITE_MASTER 0xf + +struct vexpress_syscfg { + struct device *dev; + void __iomem *base; + struct list_head funcs; +}; + +struct vexpress_syscfg_func { + struct list_head list; + struct vexpress_syscfg *syscfg; + struct regmap *regmap; + int num_templates; + u32 template[]; /* Keep it last! */ +}; + +struct vexpress_config_bridge_ops { + struct regmap * (*regmap_init)(struct device *dev, void *context); + void (*regmap_exit)(struct regmap *regmap, void *context); +}; struct vexpress_config_bridge { struct vexpress_config_bridge_ops *ops; @@ -18,26 +69,20 @@ struct vexpress_config_bridge { static DEFINE_MUTEX(vexpress_config_mutex); -static struct class *vexpress_config_class; static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER; -void vexpress_config_set_master(u32 site) +static void vexpress_config_set_master(u32 site) { vexpress_config_site_master = site; } -u32 vexpress_config_get_master(void) -{ - return vexpress_config_site_master; -} - -void vexpress_config_lock(void *arg) +static void vexpress_config_lock(void *arg) { mutex_lock(&vexpress_config_mutex); } -void vexpress_config_unlock(void *arg) +static void vexpress_config_unlock(void *arg) { mutex_unlock(&vexpress_config_mutex); } @@ -59,7 +104,7 @@ static void vexpress_config_find_prop(struct device_node *node, } } -int vexpress_config_get_topo(struct device_node *node, u32 *site, +static int vexpress_config_get_topo(struct device_node *node, u32 *site, u32 *position, u32 *dcc) { vexpress_config_find_prop(node, "arm,vexpress,site", site); @@ -88,9 +133,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev) struct regmap *regmap; struct regmap **res; - if (WARN_ON(dev->parent->class != vexpress_config_class)) - return ERR_PTR(-ENODEV); - bridge = dev_get_drvdata(dev->parent); if (WARN_ON(!bridge)) return ERR_PTR(-EINVAL); @@ -113,91 +155,265 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev) } EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config); -struct device *vexpress_config_bridge_register(struct device *parent, - struct vexpress_config_bridge_ops *ops, void *context) +static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, + int index, bool write, u32 *data) { - struct device *dev; - struct vexpress_config_bridge *bridge; + struct vexpress_syscfg *syscfg = func->syscfg; + u32 command, status; + int tries; + long timeout; - if (!vexpress_config_class) { - vexpress_config_class = class_create(THIS_MODULE, - "vexpress-config"); - if (IS_ERR(vexpress_config_class)) - return (void *)vexpress_config_class; + if (WARN_ON(index >= func->num_templates)) + return -EINVAL; + + command = readl(syscfg->base + SYS_CFGCTRL); + if (WARN_ON(command & SYS_CFGCTRL_START)) + return -EBUSY; + + command = func->template[index]; + command |= SYS_CFGCTRL_START; + command |= write ? SYS_CFGCTRL_WRITE : 0; + + /* Use a canary for reads */ + if (!write) + *data = 0xdeadbeef; + + dev_dbg(syscfg->dev, "func %p, command %x, data %x\n", + func, command, *data); + writel(*data, syscfg->base + SYS_CFGDATA); + writel(0, syscfg->base + SYS_CFGSTAT); + writel(command, syscfg->base + SYS_CFGCTRL); + mb(); + + /* The operation can take ages... Go to sleep, 100us initially */ + tries = 100; + timeout = 100; + do { + if (!irqs_disabled()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(timeout)); + if (signal_pending(current)) + return -EINTR; + } else { + udelay(timeout); + } + + status = readl(syscfg->base + SYS_CFGSTAT); + if (status & SYS_CFGSTAT_ERR) + return -EFAULT; + + if (timeout > 20) + timeout -= 20; + } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); + if (WARN_ON_ONCE(!tries)) + return -ETIMEDOUT; + + if (!write) { + *data = readl(syscfg->base + SYS_CFGDATA); + dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data); } - dev = device_create(vexpress_config_class, parent, 0, - NULL, "%s.bridge", dev_name(parent)); + return 0; +} + +static int vexpress_syscfg_read(void *context, unsigned int index, + unsigned int *val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, false, val); +} + +static int vexpress_syscfg_write(void *context, unsigned int index, + unsigned int val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, true, &val); +} + +static struct regmap_config vexpress_syscfg_regmap_config = { + .lock = vexpress_config_lock, + .unlock = vexpress_config_unlock, + .reg_bits = 32, + .val_bits = 32, + .reg_read = vexpress_syscfg_read, + .reg_write = vexpress_syscfg_write, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + - if (IS_ERR(dev)) - return dev; +static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, + void *context) +{ + int err; + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func; + struct property *prop; + const __be32 *val = NULL; + __be32 energy_quirk[4]; + int num; + u32 site, position, dcc; + int i; + + err = vexpress_config_get_topo(dev->of_node, &site, + &position, &dcc); + if (err) + return ERR_PTR(err); + + prop = of_find_property(dev->of_node, + "arm,vexpress-sysreg,func", NULL); + if (!prop) + return ERR_PTR(-EINVAL); - bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - put_device(dev); - device_unregister(dev); + num = prop->length / sizeof(u32) / 2; + val = prop->value; + + /* + * "arm,vexpress-energy" function used to be described + * by its first device only, now it requires both + */ + if (num == 1 && of_device_is_compatible(dev->of_node, + "arm,vexpress-energy")) { + num = 2; + energy_quirk[0] = *val; + energy_quirk[2] = *val++; + energy_quirk[1] = *val; + energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); + val = energy_quirk; + } + + func = kzalloc(struct_size(func, template, num), GFP_KERNEL); + if (!func) return ERR_PTR(-ENOMEM); + + func->syscfg = syscfg; + func->num_templates = num; + + for (i = 0; i < num; i++) { + u32 function, device; + + function = be32_to_cpup(val++); + device = be32_to_cpup(val++); + + dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", + func, site, position, dcc, + function, device); + + func->template[i] = SYS_CFGCTRL_DCC(dcc); + func->template[i] |= SYS_CFGCTRL_SITE(site); + func->template[i] |= SYS_CFGCTRL_POSITION(position); + func->template[i] |= SYS_CFGCTRL_FUNC(function); + func->template[i] |= SYS_CFGCTRL_DEVICE(device); } - bridge->ops = ops; - bridge->context = context; - dev_set_drvdata(dev, bridge); + vexpress_syscfg_regmap_config.max_register = num - 1; - dev_dbg(parent, "Registered bridge '%s', parent node %p\n", - dev_name(dev), parent->of_node); + func->regmap = regmap_init(dev, NULL, func, + &vexpress_syscfg_regmap_config); - return dev; -} + if (IS_ERR(func->regmap)) { + void *err = func->regmap; + kfree(func); + return err; + } + + list_add(&func->list, &syscfg->funcs); -static int vexpress_config_node_match(struct device *dev, const void *data) + return func->regmap; +} + +static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) { - const struct device_node *node = data; + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func, *tmp; - dev_dbg(dev, "Parent node %p, looking for %p\n", - dev->parent->of_node, node); + regmap_exit(regmap); - return dev->parent->of_node == node; + list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) { + if (func->regmap == regmap) { + list_del(&syscfg->funcs); + kfree(func); + break; + } + } } -static int vexpress_config_populate(struct device_node *node) +static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { + .regmap_init = vexpress_syscfg_regmap_init, + .regmap_exit = vexpress_syscfg_regmap_exit, +}; + + +static int vexpress_syscfg_probe(struct platform_device *pdev) { - struct device_node *bridge; - struct device *parent; - int ret; + struct vexpress_syscfg *syscfg; + struct resource *res; + struct vexpress_config_bridge *bridge; + struct device_node *node; + int master; + u32 dt_hbi; + + syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL); + if (!syscfg) + return -ENOMEM; + syscfg->dev = &pdev->dev; + INIT_LIST_HEAD(&syscfg->funcs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + syscfg->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(syscfg->base)) + return PTR_ERR(syscfg->base); - bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0); + bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL); if (!bridge) - return -EINVAL; + return -ENOMEM; - parent = class_find_device(vexpress_config_class, NULL, bridge, - vexpress_config_node_match); - of_node_put(bridge); - if (WARN_ON(!parent)) - return -ENODEV; + bridge->ops = &vexpress_syscfg_bridge_ops; + bridge->context = syscfg; - ret = of_platform_populate(node, NULL, NULL, parent); + dev_set_drvdata(&pdev->dev, bridge); - put_device(parent); + master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ? + VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1; + vexpress_config_set_master(master); - return ret; -} + /* Confirm board type against DT property, if available */ + if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) { + u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ? + SYS_PROCID0 : SYS_PROCID1)); + u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; -static int __init vexpress_config_init(void) -{ - int err = 0; - struct device_node *node; + if (WARN_ON(dt_hbi != hbi)) + dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n", + dt_hbi, hbi); + } - /* Need the config devices early, before the "normal" devices... */ for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") { - err = vexpress_config_populate(node); - if (err) { - of_node_put(node); - break; - } + struct device_node *bridge_np; + + bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0); + if (bridge_np != pdev->dev.parent->of_node) + continue; + + of_platform_populate(node, NULL, NULL, &pdev->dev); } - return err; + return 0; } -postcore_initcall(vexpress_config_init); +static const struct platform_device_id vexpress_syscfg_id_table[] = { + { "vexpress-syscfg", }, + {}, +}; +MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table); + +static struct platform_driver vexpress_syscfg_driver = { + .driver.name = "vexpress-syscfg", + .id_table = vexpress_syscfg_id_table, + .probe = vexpress_syscfg_probe, +}; +module_platform_driver(vexpress_syscfg_driver); +MODULE_LICENSE("GPL v2"); |