summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 16:38:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 16:38:41 -0800
commit48c1c40ab40cb087b992e7b77518c3a2926743cc (patch)
tree8e5fcd8f0e45f6e05e08c2c8307417f17341768f /drivers
parent9805529ec544ea7a82d891d5239a8ebd3dbb2a3e (diff)
parent1dcdee6ee8f8fdfef5932699129d442d2f1a064d (diff)
downloadlinux-48c1c40ab40cb087b992e7b77518c3a2926743cc.tar.bz2
Merge tag 'arm-soc-drivers-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "There are a couple of subsystems maintained by other people that merge their drivers through the SoC tree, those changes include: - The SCMI firmware framework gains support for sensor notifications and for controlling voltage domains. - A large update for the Tegra memory controller driver, integrating it better with the interconnect framework - The memory controller subsystem gains support for Mediatek MT8192 - The reset controller framework gains support for sharing pulsed resets For Soc specific drivers in drivers/soc, the main changes are - The Allwinner/sunxi MBUS gets a rework for the way it handles dma_map_ops and offsets between physical and dma address spaces. - An errata fix plus some cleanups for Freescale Layerscape SoCs - A cleanup for renesas drivers regarding MMIO accesses. - New SoC specific drivers for Mediatek MT8192 and MT8183 power domains - New SoC specific drivers for Aspeed AST2600 LPC bus control and SoC identification. - Core Power Domain support for Qualcomm MSM8916, MSM8939, SDM660 and SDX55. - A rework of the TI AM33xx 'genpd' power domain support to use information from DT instead of platform data - Support for TI AM64x SoCs - Allow building some Amlogic drivers as modules instead of built-in Finally, there are numerous cleanups and smaller bug fixes for Mediatek, Tegra, Samsung, Qualcomm, TI OMAP, Amlogic, Rockchips, Renesas, and Xilinx SoCs" * tag 'arm-soc-drivers-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (222 commits) soc: mediatek: mmsys: Specify HAS_IOMEM dependency for MTK_MMSYS firmware: xilinx: Properly align function parameter firmware: xilinx: Add a blank line after function declaration firmware: xilinx: Remove additional newline firmware: xilinx: Fix kernel-doc warnings firmware: xlnx-zynqmp: fix compilation warning soc: xilinx: vcu: add missing register NUM_CORE soc: xilinx: vcu: use vcu-settings syscon registers dt-bindings: soc: xlnx: extract xlnx, vcu-settings to separate binding soc: xilinx: vcu: drop useless success message clk: samsung: mark PM functions as __maybe_unused soc: samsung: exynos-chipid: initialize later - with arch_initcall soc: samsung: exynos-chipid: order list of SoCs by name memory: jz4780_nemc: Fix potential NULL dereference in jz4780_nemc_probe() memory: ti-emif-sram: only build for ARMv7 memory: tegra30: Support interconnect framework memory: tegra20: Support hardware versioning and clean up OPP table initialization dt-bindings: memory: tegra20-emc: Document opp-supported-hw property soc: rockchip: io-domain: Fix error return code in rockchip_iodomain_probe() reset-controller: ti: force the write operation when assert or deassert ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/ti-sysc.c41
-rw-r--r--drivers/clk/samsung/Kconfig10
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c199
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c3
-rw-r--r--drivers/firmware/arm_scmi/notify.c10
-rw-r--r--drivers/firmware/arm_scmi/sensors.c720
-rw-r--r--drivers/firmware/imx/imx-dsp.c72
-rw-r--r--drivers/firmware/imx/scu-pd.c12
-rw-r--r--drivers/firmware/meson/Kconfig5
-rw-r--r--drivers/firmware/meson/meson_sm.c1
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c6
-rw-r--r--drivers/firmware/ti_sci.c213
-rw-r--r--drivers/firmware/ti_sci.h72
-rw-r--r--drivers/firmware/xilinx/zynqmp.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h34
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c19
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c27
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c17
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/jz4780-nemc.c6
-rw-r--r--drivers/memory/mtk-smi.c19
-rw-r--r--drivers/memory/renesas-rpc-if.c18
-rw-r--r--drivers/memory/tegra/Kconfig10
-rw-r--r--drivers/memory/tegra/mc.c155
-rw-r--r--drivers/memory/tegra/mc.h22
-rw-r--r--drivers/memory/tegra/tegra114.c6
-rw-r--r--drivers/memory/tegra/tegra124-emc.c22
-rw-r--r--drivers/memory/tegra/tegra124.c6
-rw-r--r--drivers/memory/tegra/tegra20-emc.c520
-rw-r--r--drivers/memory/tegra/tegra20.c77
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c39
-rw-r--r--drivers/memory/tegra/tegra210.c60
-rw-r--r--drivers/memory/tegra/tegra30-emc.c411
-rw-r--r--drivers/memory/tegra/tegra30.c245
-rw-r--r--drivers/pcmcia/Kconfig1
-rw-r--r--drivers/pcmcia/at91_cf.c50
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c41
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/reset/core.c73
-rw-r--r--drivers/reset/reset-meson.c8
-rw-r--r--drivers/reset/reset-socfpga.c11
-rw-r--r--drivers/reset/reset-ti-syscon.c4
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/Kconfig12
-rw-r--r--drivers/soc/amlogic/meson-canvas.c4
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c5
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c8
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c7
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c5
-rw-r--r--drivers/soc/aspeed/Kconfig47
-rw-r--r--drivers/soc/aspeed/Makefile1
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c37
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c2
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c135
-rw-r--r--drivers/soc/atmel/soc.c6
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c18
-rw-r--r--drivers/soc/fsl/qbman/qman.c8
-rw-r--r--drivers/soc/fsl/qe/qe_common.c2
-rw-r--r--drivers/soc/fsl/rcpm.c35
-rw-r--r--drivers/soc/mediatek/Kconfig22
-rw-r--r--drivers/soc/mediatek/Makefile2
-rw-r--r--drivers/soc/mediatek/mt8173-pm-domains.h94
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h221
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h292
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c41
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c308
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c5
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c11
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c614
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h102
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c5
-rw-r--r--drivers/soc/qcom/Kconfig5
-rw-r--r--drivers/soc/qcom/cmd-db.c8
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c147
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c9
-rw-r--r--drivers/soc/qcom/qcom_aoss.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/rpmh.c14
-rw-r--r--drivers/soc/qcom/rpmhpd.c16
-rw-r--r--drivers/soc/qcom/rpmpd.c85
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/qcom/smp2p.c6
-rw-r--r--drivers/soc/qcom/smsm.c4
-rw-r--r--drivers/soc/qcom/socinfo.c6
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c8
-rw-r--r--drivers/soc/renesas/rmobile-sysc.c17
-rw-r--r--drivers/soc/rockchip/io-domain.c4
-rw-r--r--drivers/soc/samsung/exynos-chipid.c11
-rw-r--r--drivers/soc/samsung/exynos-pmu.c11
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c2
-rw-r--r--drivers/soc/samsung/s3c-pm-check.c2
-rw-r--r--drivers/soc/sunxi/Kconfig8
-rw-r--r--drivers/soc/sunxi/Makefile1
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c132
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c21
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c8
-rw-r--r--drivers/soc/ti/Kconfig18
-rw-r--r--drivers/soc/ti/k3-ringacc.c98
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/knav_dma.c15
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c66
-rw-r--r--drivers/soc/ti/omap_prm.c84
-rw-r--r--drivers/soc/ti/pm33xx.c21
-rw-r--r--drivers/soc/ti/pruss.c6
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c12
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/xilinx/Kconfig1
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c96
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c18
-rw-r--r--drivers/tee/optee/device.c2
121 files changed, 5252 insertions, 1136 deletions
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 92ecf1a78ec7..a27d751cf219 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -853,8 +853,12 @@ static int sysc_ioremap(struct sysc *ddata)
*/
static int sysc_map_and_check_registers(struct sysc *ddata)
{
+ struct device_node *np = ddata->dev->of_node;
int error;
+ if (!of_get_property(np, "reg", NULL))
+ return 0;
+
error = sysc_parse_and_check_child_range(ddata);
if (error)
return error;
@@ -1222,10 +1226,10 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
ddata->enabled = false;
err_allow_idle:
- reset_control_assert(ddata->rsts);
-
sysc_clkdm_allow_idle(ddata);
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -1379,6 +1383,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
@@ -1814,6 +1820,14 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
+
+ return;
+ }
+#endif
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
@@ -1945,6 +1959,7 @@ static int sysc_reset(struct sysc *ddata)
*/
static int sysc_init_module(struct sysc *ddata)
{
+ bool rstctrl_deasserted = false;
int error = 0;
error = sysc_clockdomain_init(ddata);
@@ -1969,6 +1984,7 @@ static int sysc_init_module(struct sysc *ddata)
error = reset_control_deassert(ddata->rsts);
if (error)
goto err_main_clocks;
+ rstctrl_deasserted = true;
}
ddata->revision = sysc_read_revision(ddata);
@@ -1978,13 +1994,13 @@ static int sysc_init_module(struct sysc *ddata)
if (ddata->legacy_mode) {
error = sysc_legacy_init(ddata);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
error = sysc_reset(ddata);
@@ -1994,10 +2010,6 @@ static int sysc_init_module(struct sysc *ddata)
if (error && !ddata->legacy_mode)
sysc_disable_module(ddata->dev);
-err_reset:
- if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
err_main_clocks:
if (error)
sysc_disable_main_clocks(ddata);
@@ -2008,6 +2020,10 @@ err_opt_clocks:
sysc_clkdm_allow_idle(ddata);
}
+ if (error && rstctrl_deasserted &&
+ !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -2909,6 +2925,9 @@ static int sysc_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
+ ddata->offsets[SYSC_REVISION] = -ENODEV;
+ ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
+ ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
@@ -2975,9 +2994,6 @@ static int sysc_probe(struct platform_device *pdev)
}
/* Balance use counts as PM runtime should have enabled these all */
- if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
if (!(ddata->cfg.quirks &
(SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
sysc_disable_main_clocks(ddata);
@@ -2985,6 +3001,9 @@ static int sysc_probe(struct platform_device *pdev)
sysc_clkdm_allow_idle(ddata);
}
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 57d4b3f20417..7e9c186e57ef 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -19,6 +19,16 @@ config EXYNOS_AUDSS_CLK_CON
on some Exynos SoC variants. Choose M or Y here if you want to
use audio devices such as I2S, PCM, etc.
+config EXYNOS_CLKOUT
+ tristate "Samsung Exynos clock output driver"
+ depends on COMMON_CLK_SAMSUNG
+ default y if ARCH_EXYNOS
+ help
+ Support for the clock output (XCLKOUT) present on some of Exynos SoC
+ variants. Usually the XCLKOUT is used to monitor the status of the
+ certains clocks from SoC, but it could also be tied to other devices
+ as an input clock.
+
# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 1a4e6b787978..6891b087acff 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
-obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
+obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 34ccb1d23bc3..e6d6cbf8c4e6 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -9,10 +9,13 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
#define EXYNOS_CLKOUT_NR_CLKS 1
#define EXYNOS_CLKOUT_PARENTS 32
@@ -28,41 +31,103 @@ struct exynos_clkout {
struct clk_mux mux;
spinlock_t slock;
void __iomem *reg;
+ struct device_node *np;
u32 pmu_debug_save;
struct clk_hw_onecell_data data;
};
-static struct exynos_clkout *clkout;
+struct exynos_clkout_variant {
+ u32 mux_mask;
+};
-static int exynos_clkout_suspend(void)
-{
- clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+static const struct exynos_clkout_variant exynos_clkout_exynos4 = {
+ .mux_mask = EXYNOS4_CLKOUT_MUX_MASK,
+};
- return 0;
-}
+static const struct exynos_clkout_variant exynos_clkout_exynos5 = {
+ .mux_mask = EXYNOS5_CLKOUT_MUX_MASK,
+};
-static void exynos_clkout_resume(void)
+static const struct of_device_id exynos_clkout_ids[] = {
+ {
+ .compatible = "samsung,exynos3250-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos4210-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos4412-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos5250-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5410-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5420-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5433-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, { }
+};
+MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
+
+/*
+ * Device will be instantiated as child of PMU device without its own
+ * device node. Therefore match compatibles against parent.
+ */
+static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
- writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
-}
+ const struct exynos_clkout_variant *variant;
+ const struct of_device_id *match;
-static struct syscore_ops exynos_clkout_syscore_ops = {
- .suspend = exynos_clkout_suspend,
- .resume = exynos_clkout_resume,
-};
+ if (!dev->parent) {
+ dev_err(dev, "not instantiated from MFD\n");
+ return -EINVAL;
+ }
+
+ match = of_match_device(exynos_clkout_ids, dev->parent);
+ if (!match) {
+ dev_err(dev, "cannot match parent device\n");
+ return -EINVAL;
+ }
+ variant = match->data;
-static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
+ *mux_mask = variant->mux_mask;
+
+ return 0;
+}
+
+static int exynos_clkout_probe(struct platform_device *pdev)
{
const char *parent_names[EXYNOS_CLKOUT_PARENTS];
struct clk *parents[EXYNOS_CLKOUT_PARENTS];
- int parent_count;
- int ret;
- int i;
+ struct exynos_clkout *clkout;
+ int parent_count, ret, i;
+ u32 mux_mask;
- clkout = kzalloc(struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
- GFP_KERNEL);
+ clkout = devm_kzalloc(&pdev->dev,
+ struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
+ GFP_KERNEL);
if (!clkout)
- return;
+ return -ENOMEM;
+
+ ret = exynos_clkout_match_parent_dev(&pdev->dev, &mux_mask);
+ if (ret)
+ return ret;
+
+ clkout->np = pdev->dev.of_node;
+ if (!clkout->np) {
+ /*
+ * pdev->dev.parent was checked by exynos_clkout_match_parent_dev()
+ * so it is not NULL.
+ */
+ clkout->np = pdev->dev.parent->of_node;
+ }
+
+ platform_set_drvdata(pdev, clkout);
spin_lock_init(&clkout->slock);
@@ -71,7 +136,7 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
char name[] = "clkoutXX";
snprintf(name, sizeof(name), "clkout%d", i);
- parents[i] = of_clk_get_by_name(node, name);
+ parents[i] = of_clk_get_by_name(clkout->np, name);
if (IS_ERR(parents[i])) {
parent_names[i] = "none";
continue;
@@ -82,11 +147,13 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
}
if (!parent_count)
- goto free_clkout;
+ return -EINVAL;
- clkout->reg = of_iomap(node, 0);
- if (!clkout->reg)
+ clkout->reg = of_iomap(clkout->np, 0);
+ if (!clkout->reg) {
+ ret = -ENODEV;
goto clks_put;
+ }
clkout->gate.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
clkout->gate.bit_idx = EXYNOS_CLKOUT_DISABLE_SHIFT;
@@ -103,17 +170,17 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
&clk_gate_ops, CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT);
- if (IS_ERR(clkout->data.hws[0]))
+ if (IS_ERR(clkout->data.hws[0])) {
+ ret = PTR_ERR(clkout->data.hws[0]);
goto err_unmap;
+ }
clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, &clkout->data);
+ ret = of_clk_add_hw_provider(clkout->np, of_clk_hw_onecell_get, &clkout->data);
if (ret)
goto err_clk_unreg;
- register_syscore_ops(&exynos_clkout_syscore_ops);
-
- return;
+ return 0;
err_clk_unreg:
clk_hw_unregister(clkout->data.hws[0]);
@@ -123,38 +190,56 @@ clks_put:
for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i)
if (!IS_ERR(parents[i]))
clk_put(parents[i]);
-free_clkout:
- kfree(clkout);
- pr_err("%s: failed to register clkout clock\n", __func__);
+ dev_err(&pdev->dev, "failed to register clkout clock\n");
+
+ return ret;
}
-/*
- * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
- * the OF_POPULATED flag on the pmu device tree node, so later the
- * Exynos PMU platform device can be properly probed with PMU driver.
- */
+static int exynos_clkout_remove(struct platform_device *pdev)
+{
+ struct exynos_clkout *clkout = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(clkout->np);
+ clk_hw_unregister(clkout->data.hws[0]);
+ iounmap(clkout->reg);
+
+ return 0;
+}
-static void __init exynos4_clkout_init(struct device_node *node)
+static int __maybe_unused exynos_clkout_suspend(struct device *dev)
{
- exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
+ struct exynos_clkout *clkout = dev_get_drvdata(dev);
+
+ clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+ return 0;
}
-CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
- exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
- exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
- exynos4_clkout_init);
-
-static void __init exynos5_clkout_init(struct device_node *node)
+
+static int __maybe_unused exynos_clkout_resume(struct device *dev)
{
- exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
+ struct exynos_clkout *clkout = dev_get_drvdata(dev);
+
+ writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+ return 0;
}
-CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
- exynos5_clkout_init);
+
+static SIMPLE_DEV_PM_OPS(exynos_clkout_pm_ops, exynos_clkout_suspend,
+ exynos_clkout_resume);
+
+static struct platform_driver exynos_clkout_driver = {
+ .driver = {
+ .name = "exynos-clkout",
+ .of_match_table = exynos_clkout_ids,
+ .pm = &exynos_clkout_pm_ops,
+ },
+ .probe = exynos_clkout_probe,
+ .remove = exynos_clkout_remove,
+};
+module_platform_driver(exynos_clkout_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("Samsung Exynos clock output driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 7dc30dd6c8d5..f2c22120aaa7 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -266,6 +266,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:00bc:0",
"l4hs_gclk",
"l4fw_gclk",
"l4ls_gclk",
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 7f8ddc04342d..abe51185f243 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -155,8 +155,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
static const struct of_device_id compatible_machine_match[] = {
{ .compatible = "arm,vexpress,v2p-ca15_a7" },
- { .compatible = "samsung,exynos5420" },
- { .compatible = "samsung,exynos5800" },
+ { .compatible = "google,peach" },
{},
};
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index ce336899d636..66196b293b6c 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1474,17 +1474,17 @@ int scmi_notification_init(struct scmi_handle *handle)
ni->gid = gid;
ni->handle = handle;
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!ni->notify_wq)
goto err;
- ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
- sizeof(char *), GFP_KERNEL);
- if (!ni->registered_protocols)
- goto err;
-
mutex_init(&ni->pending_mtx);
hash_init(ni->pending_events_handlers);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index b4232d611033..4541b891b733 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -2,21 +2,30 @@
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2020 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+#include <linux/bitfield.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "notify.h"
+#define SCMI_MAX_NUM_SENSOR_AXIS 63
+#define SCMIv2_SENSOR_PROTOCOL 0x10000
+
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
SENSOR_TRIP_POINT_NOTIFY = 0x4,
SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
+ SENSOR_AXIS_DESCRIPTION_GET = 0x7,
+ SENSOR_LIST_UPDATE_INTERVALS = 0x8,
+ SENSOR_CONFIG_GET = 0x9,
+ SENSOR_CONFIG_SET = 0xA,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
};
struct scmi_msg_resp_sensor_attributes {
@@ -28,29 +37,106 @@ struct scmi_msg_resp_sensor_attributes {
__le32 reg_size;
};
+/* v3 attributes_low macros */
+#define SUPPORTS_UPDATE_NOTIFY(x) FIELD_GET(BIT(30), (x))
+#define SENSOR_TSTAMP_EXP(x) FIELD_GET(GENMASK(14, 10), (x))
+#define SUPPORTS_TIMESTAMP(x) FIELD_GET(BIT(9), (x))
+#define SUPPORTS_EXTEND_ATTRS(x) FIELD_GET(BIT(8), (x))
+
+/* v2 attributes_high macros */
+#define SENSOR_UPDATE_BASE(x) FIELD_GET(GENMASK(31, 27), (x))
+#define SENSOR_UPDATE_SCALE(x) FIELD_GET(GENMASK(26, 22), (x))
+
+/* v3 attributes_high macros */
+#define SENSOR_AXIS_NUMBER(x) FIELD_GET(GENMASK(21, 16), (x))
+#define SUPPORTS_AXIS(x) FIELD_GET(BIT(8), (x))
+
+/* v3 resolution macros */
+#define SENSOR_RES(x) FIELD_GET(GENMASK(26, 0), (x))
+#define SENSOR_RES_EXP(x) FIELD_GET(GENMASK(31, 27), (x))
+
+struct scmi_msg_resp_attrs {
+ __le32 min_range_low;
+ __le32 min_range_high;
+ __le32 max_range_low;
+ __le32 max_range_high;
+};
+
struct scmi_msg_resp_sensor_description {
__le16 num_returned;
__le16 num_remaining;
- struct {
+ struct scmi_sensor_descriptor {
__le32 id;
__le32 attributes_low;
-#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
-#define NUM_TRIP_POINTS(x) ((x) & 0xff)
+/* Common attributes_low macros */
+#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x))
+#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x))
__le32 attributes_high;
-#define SENSOR_TYPE(x) ((x) & 0xff)
-#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
-#define SENSOR_SCALE_SIGN BIT(4)
-#define SENSOR_SCALE_EXTEND GENMASK(7, 5)
-#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
-#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
- u8 name[SCMI_MAX_STR_SIZE];
- } desc[0];
+/* Common attributes_high macros */
+#define SENSOR_SCALE(x) FIELD_GET(GENMASK(15, 11), (x))
+#define SENSOR_SCALE_SIGN BIT(4)
+#define SENSOR_SCALE_EXTEND GENMASK(31, 5)
+#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x))
+ u8 name[SCMI_MAX_STR_SIZE];
+ /* only for version > 2.0 */
+ __le32 power;
+ __le32 resolution;
+ struct scmi_msg_resp_attrs scalar_attrs;
+ } desc[];
};
-struct scmi_msg_sensor_trip_point_notify {
+/* Base scmi_sensor_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_SENS_DESCR_BASE_SZ 28
+
+/* Sign extend to a full s32 */
+#define S32_EXT(v) \
+ ({ \
+ int __v = (v); \
+ \
+ if (__v & SENSOR_SCALE_SIGN) \
+ __v |= SENSOR_SCALE_EXTEND; \
+ __v; \
+ })
+
+struct scmi_msg_sensor_axis_description_get {
+ __le32 id;
+ __le32 axis_desc_index;
+};
+
+struct scmi_msg_resp_sensor_axis_description {
+ __le32 num_axis_flags;
+#define NUM_AXIS_RETURNED(x) FIELD_GET(GENMASK(5, 0), (x))
+#define NUM_AXIS_REMAINING(x) FIELD_GET(GENMASK(31, 26), (x))
+ struct scmi_axis_descriptor {
+ __le32 id;
+ __le32 attributes_low;
+ __le32 attributes_high;
+ u8 name[SCMI_MAX_STR_SIZE];
+ __le32 resolution;
+ struct scmi_msg_resp_attrs attrs;
+ } desc[];
+};
+
+/* Base scmi_axis_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28
+
+struct scmi_msg_sensor_list_update_intervals {
+ __le32 id;
+ __le32 index;
+};
+
+struct scmi_msg_resp_sensor_list_update_intervals {
+ __le32 num_intervals_flags;
+#define NUM_INTERVALS_RETURNED(x) FIELD_GET(GENMASK(11, 0), (x))
+#define SEGMENTED_INTVL_FORMAT(x) FIELD_GET(BIT(12), (x))
+#define NUM_INTERVALS_REMAINING(x) FIELD_GET(GENMASK(31, 16), (x))
+ __le32 intervals[];
+};
+
+struct scmi_msg_sensor_request_notify {
__le32 id;
__le32 event_control;
-#define SENSOR_TP_NOTIFY_ALL BIT(0)
+#define SENSOR_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
@@ -66,18 +152,46 @@ struct scmi_msg_set_sensor_trip_point {
__le32 value_high;
};
+struct scmi_msg_sensor_config_set {
+ __le32 id;
+ __le32 sensor_config;
+};
+
struct scmi_msg_sensor_reading_get {
__le32 id;
__le32 flags;
#define SENSOR_READ_ASYNC BIT(0)
};
+struct scmi_resp_sensor_reading_complete {
+ __le32 id;
+ __le64 readings;
+};
+
+struct scmi_sensor_reading_resp {
+ __le32 sensor_value_low;
+ __le32 sensor_value_high;
+ __le32 timestamp_low;
+ __le32 timestamp_high;
+};
+
+struct scmi_resp_sensor_reading_complete_v3 {
+ __le32 id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
struct scmi_sensor_trip_notify_payld {
__le32 agent_id;
__le32 sensor_id;
__le32 trip_point_desc;
};
+struct scmi_sensor_update_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
struct sensors_info {
u32 version;
int num_sensors;
@@ -114,6 +228,194 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
return ret;
}
+static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
+ struct scmi_msg_resp_attrs *in)
+{
+ out->min_range = get_unaligned_le64((void *)&in->min_range_low);
+ out->max_range = get_unaligned_le64((void *)&in->max_range_low);
+}
+
+static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
+ struct scmi_sensor_info *s)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *ti;
+ struct scmi_msg_resp_sensor_list_update_intervals *buf;
+ struct scmi_msg_sensor_list_update_intervals *msg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_LIST_UPDATE_INTERVALS,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &ti);
+ if (ret)
+ return ret;
+
+ buf = ti->rx.buf;
+ do {
+ u32 flags;
+
+ msg = ti->tx.buf;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->index = cpu_to_le32(desc_index);
+
+ ret = scmi_do_xfer(handle, ti);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(buf->num_intervals_flags);
+ num_returned = NUM_INTERVALS_RETURNED(flags);
+ num_remaining = NUM_INTERVALS_REMAINING(flags);
+
+ /*
+ * Max intervals is not declared previously anywhere so we
+ * assume it's returned+remaining.
+ */
+ if (!s->intervals.count) {
+ s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
+ s->intervals.count = num_returned + num_remaining;
+ /* segmented intervals are reported in one triplet */
+ if (s->intervals.segmented &&
+ (num_remaining || num_returned != 3)) {
+ dev_err(handle->dev,
+ "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
+ s->id, s->intervals.count);
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ ret = -EINVAL;
+ break;
+ }
+ /* Direct allocation when exceeding pre-allocated */
+ if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
+ s->intervals.desc =
+ devm_kcalloc(handle->dev,
+ s->intervals.count,
+ sizeof(*s->intervals.desc),
+ GFP_KERNEL);
+ if (!s->intervals.desc) {
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ } else if (desc_index + num_returned > s->intervals.count) {
+ dev_err(handle->dev,
+ "No. of update intervals can't exceed %d\n",
+ s->intervals.count);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (cnt = 0; cnt < num_returned; cnt++)
+ s->intervals.desc[desc_index + cnt] =
+ le32_to_cpu(buf->intervals[cnt]);
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, ti);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, ti);
+ return ret;
+}
+
+static int scmi_sensor_axis_description(const struct scmi_handle *handle,
+ struct scmi_sensor_info *s)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *te;
+ struct scmi_msg_resp_sensor_axis_description *buf;
+ struct scmi_msg_sensor_axis_description_get *msg;
+
+ s->axis = devm_kcalloc(handle->dev, s->num_axis,
+ sizeof(*s->axis), GFP_KERNEL);
+ if (!s->axis)
+ return -ENOMEM;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_AXIS_DESCRIPTION_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &te);
+ if (ret)
+ return ret;
+
+ buf = te->rx.buf;
+ do {
+ u32 flags;
+ struct scmi_axis_descriptor *adesc;
+
+ msg = te->tx.buf;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->axis_desc_index = cpu_to_le32(desc_index);
+
+ ret = scmi_do_xfer(handle, te);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(buf->num_axis_flags);
+ num_returned = NUM_AXIS_RETURNED(flags);
+ num_remaining = NUM_AXIS_REMAINING(flags);
+
+ if (desc_index + num_returned > s->num_axis) {
+ dev_err(handle->dev, "No. of axis can't exceed %d\n",
+ s->num_axis);
+ break;
+ }
+
+ adesc = &buf->desc[0];
+ for (cnt = 0; cnt < num_returned; cnt++) {
+ u32 attrh, attrl;
+ struct scmi_sensor_axis_info *a;
+ size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+
+ attrl = le32_to_cpu(adesc->attributes_low);
+
+ a = &s->axis[desc_index + cnt];
+
+ a->id = le32_to_cpu(adesc->id);
+ a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(adesc->attributes_high);
+ a->scale = S32_EXT(SENSOR_SCALE(attrh));
+ a->type = SENSOR_TYPE(attrh);
+ strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+
+ if (a->extended_attrs) {
+ unsigned int ares =
+ le32_to_cpu(adesc->resolution);
+
+ a->resolution = SENSOR_RES(ares);
+ a->exponent =
+ S32_EXT(SENSOR_RES_EXP(ares));
+ dsize += sizeof(adesc->resolution);
+
+ scmi_parse_range_attrs(&a->attrs,
+ &adesc->attrs);
+ dsize += sizeof(adesc->attrs);
+ }
+
+ adesc = (typeof(adesc))((u8 *)adesc + dsize);
+ }
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, te);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, te);
+ return ret;
+}
+
static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct sensors_info *si)
{
@@ -131,9 +433,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
buf = t->rx.buf;
do {
+ struct scmi_sensor_descriptor *sdesc;
+
/* Set the number of sensors to be skipped/already read */
put_unaligned_le32(desc_index, t->tx.buf);
-
ret = scmi_do_xfer(handle, t);
if (ret)
break;
@@ -147,22 +450,97 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
break;
}
+ sdesc = &buf->desc[0];
for (cnt = 0; cnt < num_returned; cnt++) {
u32 attrh, attrl;
struct scmi_sensor_info *s;
+ size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
- attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
- attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
- s->id = le32_to_cpu(buf->desc[cnt].id);
- s->type = SENSOR_TYPE(attrh);
- s->scale = SENSOR_SCALE(attrh);
- /* Sign extend to a full s8 */
- if (s->scale & SENSOR_SCALE_SIGN)
- s->scale |= SENSOR_SCALE_EXTEND;
+ s->id = le32_to_cpu(sdesc->id);
+
+ attrl = le32_to_cpu(sdesc->attributes_low);
+ /* common bitfields parsing */
s->async = SUPPORTS_ASYNC_READ(attrl);
s->num_trip_points = NUM_TRIP_POINTS(attrl);
- strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+ /**
+ * only SCMIv3.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ s->timestamped = SUPPORTS_TIMESTAMP(attrl);
+ if (s->timestamped)
+ s->tstamp_scale =
+ S32_EXT(SENSOR_TSTAMP_EXP(attrl));
+ s->extended_scalar_attrs =
+ SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(sdesc->attributes_high);
+ /* common bitfields parsing */
+ s->scale = S32_EXT(SENSOR_SCALE(attrh));
+ s->type = SENSOR_TYPE(attrh);
+ /* Use pre-allocated pool wherever possible */
+ s->intervals.desc = s->intervals.prealloc_pool;
+ if (si->version == SCMIv2_SENSOR_PROTOCOL) {
+ s->intervals.segmented = false;
+ s->intervals.count = 1;
+ /*
+ * Convert SCMIv2.0 update interval format to
+ * SCMIv3.0 to be used as the common exposed
+ * descriptor, accessible via common macros.
+ */
+ s->intervals.desc[0] =
+ (SENSOR_UPDATE_BASE(attrh) << 5) |
+ SENSOR_UPDATE_SCALE(attrh);
+ } else {
+ /*
+ * From SCMIv3.0 update intervals are retrieved
+ * via a dedicated (optional) command.
+ * Since the command is optional, on error carry
+ * on without any update interval.
+ */
+ if (scmi_sensor_update_intervals(handle, s))
+ dev_dbg(handle->dev,
+ "Update Intervals not available for sensor ID:%d\n",
+ s->id);
+ }
+ /**
+ * only > SCMIv2.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->num_axis = min_t(unsigned int,
+ SUPPORTS_AXIS(attrh) ?
+ SENSOR_AXIS_NUMBER(attrh) : 0,
+ SCMI_MAX_NUM_SENSOR_AXIS);
+ strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+
+ if (s->extended_scalar_attrs) {
+ s->sensor_power = le32_to_cpu(sdesc->power);
+ dsize += sizeof(sdesc->power);
+ /* Only for sensors reporting scalar values */
+ if (s->num_axis == 0) {
+ unsigned int sres =
+ le32_to_cpu(sdesc->resolution);
+
+ s->resolution = SENSOR_RES(sres);
+ s->exponent =
+ S32_EXT(SENSOR_RES_EXP(sres));
+ dsize += sizeof(sdesc->resolution);
+
+ scmi_parse_range_attrs(&s->scalar_attrs,
+ &sdesc->scalar_attrs);
+ dsize += sizeof(sdesc->scalar_attrs);
+ }
+ }
+ if (s->num_axis > 0) {
+ ret = scmi_sensor_axis_description(handle, s);
+ if (ret)
+ goto out;
+ }
+
+ sdesc = (typeof(sdesc))((u8 *)sdesc + dsize);
}
desc_index += num_returned;
@@ -174,19 +552,21 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
*/
} while (num_returned && num_remaining);
+out:
scmi_xfer_put(handle, t);
return ret;
}
-static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
- u32 sensor_id, bool enable)
+static inline int
+scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
+ u8 message_id, bool enable)
{
int ret;
- u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
+ u32 evt_cntl = enable ? SENSOR_NOTIFY_ALL : 0;
struct scmi_xfer *t;
- struct scmi_msg_sensor_trip_point_notify *cfg;
+ struct scmi_msg_sensor_request_notify *cfg;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
+ ret = scmi_xfer_get_init(handle, message_id,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -201,6 +581,23 @@ static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
return ret;
}
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(handle, sensor_id,
+ SENSOR_TRIP_POINT_NOTIFY,
+ enable);
+}
+
+static int
+scmi_sensor_continuous_update_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(handle, sensor_id,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY,
+ enable);
+}
+
static int
scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
u8 trip_id, u64 trip_value)
@@ -227,6 +624,75 @@ scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
return ret;
}
+static int scmi_sensor_config_get(const struct scmi_handle *handle,
+ u32 sensor_id, u32 *sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(__le32),
+ sizeof(__le32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ *sensor_config = get_unaligned_le64(t->rx.buf);
+ s->sensor_config = *sensor_config;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_sensor_config_set(const struct scmi_handle *handle,
+ u32 sensor_id, u32 sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_config_set *msg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->id = cpu_to_le32(sensor_id);
+ msg->sensor_config = cpu_to_le32(sensor_config);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ s->sensor_config = sensor_config;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+/**
+ * scmi_sensor_reading_get - Read scalar sensor value
+ * @handle: Platform handle
+ * @sensor_id: Sensor ID
+ * @value: The 64bit value sensor reading
+ *
+ * This function returns a single 64 bit reading value representing the sensor
+ * value; if the platform SCMI Protocol implementation and the sensor support
+ * multiple axis and timestamped-reads, this just returns the first axis while
+ * dropping the timestamp value.
+ * Use instead the @scmi_sensor_reading_get_timestamped to retrieve the array of
+ * timestamped multi-axis values.
+ *
+ * Return: 0 on Success
+ */
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
u32 sensor_id, u64 *value)
{
@@ -237,20 +703,24 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
struct scmi_sensor_info *s = si->sensors + sensor_id;
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
- sizeof(u64), &t);
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
if (ret)
return ret;
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
-
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = scmi_do_xfer_with_response(handle, t);
- if (!ret)
- *value = get_unaligned_le64((void *)
- ((__le32 *)t->rx.buf + 1));
+ if (!ret) {
+ struct scmi_resp_sensor_reading_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == sensor_id)
+ *value = get_unaligned_le64(&resp->readings);
+ else
+ ret = -EPROTO;
+ }
} else {
sensor->flags = cpu_to_le32(0);
ret = scmi_do_xfer(handle, t);
@@ -262,6 +732,84 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
return ret;
}
+static inline void
+scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
+ const struct scmi_sensor_reading_resp *in)
+{
+ out->value = get_unaligned_le64((void *)&in->sensor_value_low);
+ out->timestamp = get_unaligned_le64((void *)&in->timestamp_low);
+}
+
+/**
+ * scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
+ * @handle: Platform handle
+ * @sensor_id: Sensor ID
+ * @count: The length of the provided @readings array
+ * @readings: An array of elements each representing a timestamped per-axis
+ * reading of type @struct scmi_sensor_reading.
+ * Returned readings are ordered as the @axis descriptors array
+ * included in @struct scmi_sensor_info and the max number of
+ * returned elements is min(@count, @num_axis); ideally the provided
+ * array should be of length @count equal to @num_axis.
+ *
+ * Return: 0 on Success
+ */
+static int
+scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_reading_get *sensor;
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (!count || !readings ||
+ (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
+ return -EINVAL;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ if (ret)
+ return ret;
+
+ sensor = t->tx.buf;
+ sensor->id = cpu_to_le32(sensor_id);
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = scmi_do_xfer_with_response(handle, t);
+ if (!ret) {
+ int i;
+ struct scmi_resp_sensor_reading_complete_v3 *resp;
+
+ resp = t->rx.buf;
+ /* Retrieve only the number of requested axis anyway */
+ if (le32_to_cpu(resp->id) == sensor_id)
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp->readings[i]);
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ int i;
+ struct scmi_sensor_reading_resp *resp_readings;
+
+ resp_readings = t->rx.buf;
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp_readings[i]);
+ }
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
static const struct scmi_sensor_info *
scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
{
@@ -282,6 +830,9 @@ static const struct scmi_sensor_ops sensor_ops = {
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
+ .reading_get_timestamped = scmi_sensor_reading_get_timestamped,
+ .config_get = scmi_sensor_config_get,
+ .config_set = scmi_sensor_config_set,
};
static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
@@ -289,7 +840,19 @@ static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
{
int ret;
- ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ break;
+ case SCMI_EVENT_SENSOR_UPDATE:
+ ret = scmi_sensor_continuous_update_notify(handle, src_id,
+ enable);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -302,20 +865,59 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
- const struct scmi_sensor_trip_notify_payld *p = payld;
- struct scmi_sensor_trip_point_report *r = report;
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ {
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
- if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
- sizeof(*p) != payld_sz)
- return NULL;
+ if (sizeof(*p) != payld_sz)
+ break;
- r->timestamp = timestamp;
- r->agent_id = le32_to_cpu(p->agent_id);
- r->sensor_id = le32_to_cpu(p->sensor_id);
- r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
- *src_id = r->sensor_id;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_SENSOR_UPDATE:
+ {
+ int i;
+ struct scmi_sensor_info *s;
+ const struct scmi_sensor_update_notify_payld *p = payld;
+ struct scmi_sensor_update_report *r = report;
+ struct sensors_info *sinfo = handle->sensor_priv;
+
+ /* payld_sz is variable for this event */
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ if (r->sensor_id >= sinfo->num_sensors)
+ break;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ s = &sinfo->sensors[r->sensor_id];
+ /*
+ * The generated report r (@struct scmi_sensor_update_report)
+ * was pre-allocated to contain up to SCMI_MAX_NUM_SENSOR_AXIS
+ * readings: here it is filled with the effective @num_axis
+ * readings defined for this sensor or 1 for scalar sensors.
+ */
+ r->readings_count = s->num_axis ?: 1;
+ for (i = 0; i < r->readings_count; i++)
+ scmi_parse_sensor_readings(&r->readings[i],
+ &p->readings[i]);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
- return r;
+ return rep;
}
static const struct scmi_event sensor_events[] = {
@@ -324,6 +926,16 @@ static const struct scmi_event sensor_events[] = {
.max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
.max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
},
+ {
+ .id = SCMI_EVENT_SENSOR_UPDATE,
+ .max_payld_sz =
+ sizeof(struct scmi_sensor_update_notify_payld) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading_resp),
+ .max_report_sz = sizeof(struct scmi_sensor_update_report) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading),
+ },
};
static const struct scmi_event_ops sensor_event_ops = {
@@ -334,6 +946,7 @@ static const struct scmi_event_ops sensor_event_ops = {
static int scmi_sensors_protocol_init(struct scmi_handle *handle)
{
u32 version;
+ int ret;
struct sensors_info *sinfo;
scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
@@ -344,15 +957,19 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
+ sinfo->version = version;
- scmi_sensor_attributes_get(handle, sinfo);
-
+ ret = scmi_sensor_attributes_get(handle, sinfo);
+ if (ret)
+ return ret;
sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
- scmi_sensor_description_get(handle, sinfo);
+ ret = scmi_sensor_description_get(handle, sinfo);
+ if (ret)
+ return ret;
scmi_register_protocol_events(handle,
SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
@@ -360,9 +977,8 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
ARRAY_SIZE(sensor_events),
sinfo->num_sensors);
- sinfo->version = version;
- handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
+ handle->sensor_ops = &sensor_ops;
return 0;
}
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 4265e9dbed84..a6c06d7476c3 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -60,22 +60,40 @@ static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
}
}
-static int imx_dsp_probe(struct platform_device *pdev)
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
{
- struct device *dev = &pdev->dev;
- struct imx_dsp_ipc *dsp_ipc;
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return ERR_PTR(-EINVAL);
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ dsp_chan->ch = mbox_request_channel_byname(&dsp_chan->cl, dsp_chan->name);
+ return dsp_chan->ch;
+}
+EXPORT_SYMBOL(imx_dsp_request_channel);
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
+{
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return;
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ mbox_free_channel(dsp_chan->ch);
+}
+EXPORT_SYMBOL(imx_dsp_free_channel);
+
+static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+{
+ struct device *dev = dsp_ipc->dev;
struct imx_dsp_chan *dsp_chan;
struct mbox_client *cl;
char *chan_name;
int ret;
int i, j;
- device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
-
- dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
- if (!dsp_ipc)
- return -ENOMEM;
-
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
if (i < 2)
chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
@@ -86,6 +104,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
return -ENOMEM;
dsp_chan = &dsp_ipc->chans[i];
+ dsp_chan->name = chan_name;
cl = &dsp_chan->cl;
cl->dev = dev;
cl->tx_block = false;
@@ -104,27 +123,43 @@ static int imx_dsp_probe(struct platform_device *pdev)
}
dev_dbg(dev, "request mbox chan %s\n", chan_name);
- /* chan_name is not used anymore by framework */
- kfree(chan_name);
}
- dsp_ipc->dev = dev;
-
- dev_set_drvdata(dev, dsp_ipc);
-
- dev_info(dev, "NXP i.MX DSP IPC initialized\n");
-
return 0;
out:
- kfree(chan_name);
for (j = 0; j < i; j++) {
dsp_chan = &dsp_ipc->chans[j];
mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
}
return ret;
}
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_ipc *dsp_ipc;
+ int ret;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+ if (!dsp_ipc)
+ return -ENOMEM;
+
+ dsp_ipc->dev = dev;
+ dev_set_drvdata(dev, dsp_ipc);
+
+ ret = imx_dsp_setup_channels(dsp_ipc);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+ return 0;
+}
+
static int imx_dsp_remove(struct platform_device *pdev)
{
struct imx_dsp_chan *dsp_chan;
@@ -136,6 +171,7 @@ static int imx_dsp_remove(struct platform_device *pdev)
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
dsp_chan = &dsp_ipc->chans[i];
mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
}
return 0;
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 946eea292b52..08533ee67626 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -160,12 +160,18 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
{ "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
+ { "mipi1", IMX_SC_R_MIPI_1, 1, false, 0 },
+ { "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, false, 0 },
+ { "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, true, 0 },
+
/* LVDS SS */
{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
+ { "lvds1", IMX_SC_R_LVDS_1, 1, false, 0 },
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+ { "dc0-video", IMX_SC_R_DC_0_VIDEO0, 2, true, 0 },
/* CM40 SS */
{ "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
@@ -180,6 +186,12 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
{ "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
{ "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
+
+ /* IMAGE SS */
+ { "img-jpegdec-mp", IMX_SC_R_MJPEG_DEC_MP, 1, false, 0 },
+ { "img-jpegdec-s0", IMX_SC_R_MJPEG_DEC_S0, 4, true, 0 },
+ { "img-jpegenc-mp", IMX_SC_R_MJPEG_ENC_MP, 1, false, 0 },
+ { "img-jpegenc-s0", IMX_SC_R_MJPEG_ENC_S0, 4, true, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index 2671dcd0ad92..f2fdd3756648 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -3,8 +3,9 @@
# Amlogic Secure Monitor driver
#
config MESON_SM
- bool
- default ARCH_MESON
+ tristate "Amlogic Secure Monitor driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default y
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 2854b56f6e0b..77aa5c6398aa 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -331,3 +331,4 @@ static struct platform_driver meson_sm_driver = {
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index c1bbba9ee93a..440d99c63638 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -412,16 +412,12 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
goto out;
}
- len = strlen(ppath) + strlen(name) + 1;
+ len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name);
if (len >= pathlen) {
err = -EINVAL;
goto out;
}
- strncpy(pathbuf, ppath, pathlen);
- strncat(pathbuf, name, strlen(name));
- strcat(pathbuf, "/");
-
err = bpmp_populate_debugfs_inband(bpmp, dentry,
pathbuf);
if (err < 0)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 896f53ec7857..235c7e7869aa 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -1703,14 +1703,14 @@ fail:
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
struct ti_sci_msg_resp_get_resource_range *resp;
struct ti_sci_msg_req_get_resource_range *req;
@@ -1721,7 +1721,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (IS_ERR(handle))
return PTR_ERR(handle);
- if (!handle)
+ if (!handle || !desc)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
@@ -1751,11 +1751,14 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
- } else if (!resp->range_start && !resp->range_num) {
+ } else if (!resp->range_num && !resp->range_num_sec) {
+ /* Neither of the two resource range is valid */
ret = -ENODEV;
} else {
- *range_start = resp->range_start;
- *range_num = resp->range_num;
+ desc->start = resp->range_start;
+ desc->num = resp->range_num;
+ desc->start_sec = resp->range_start_sec;
+ desc->num_sec = resp->range_num_sec;
};
fail:
@@ -1771,18 +1774,18 @@ fail:
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
return ti_sci_get_resource_range(handle, dev_id, subtype,
TI_SCI_IRQ_SECONDARY_HOST_INVALID,
- range_start, range_num);
+ desc);
}
/**
@@ -1793,18 +1796,17 @@ static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static
int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
- return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
- range_start, range_num);
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
}
/**
@@ -2047,28 +2049,17 @@ static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
}
/**
- * ti_sci_cmd_ring_config() - configure RA ring
- * @handle: Pointer to TI SCI handle.
- * @valid_params: Bitfield defining validity of ring configuration
- * parameters
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: The ring base address lo 32 bits
- * @addr_hi: The ring base address hi 32 bits
- * @count: Number of ring elements
- * @mode: The mode of the ring
- * @size: The ring element size.
- * @order_id: Specifies the ring's bus order ID
+ * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
- * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
+ * more info.
*/
-static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
- u32 valid_params, u16 nav_id, u16 index,
- u32 addr_lo, u32 addr_hi, u32 count,
- u8 mode, u8 size, u8 order_id)
+static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params)
{
struct ti_sci_msg_rm_ring_cfg_req *req;
struct ti_sci_msg_hdr *resp;
@@ -2092,15 +2083,17 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
return ret;
}
req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
- req->valid_params = valid_params;
- req->nav_id = nav_id;
- req->index = index;
- req->addr_lo = addr_lo;
- req->addr_hi = addr_hi;
- req->count = count;
- req->mode = mode;
- req->size = size;
- req->order_id = order_id;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->addr_lo = params->addr_lo;
+ req->addr_hi = params->addr_hi;
+ req->count = params->count;
+ req->mode = params->mode;
+ req->size = params->size;
+ req->order_id = params->order_id;
+ req->virtid = params->virtid;
+ req->asel = params->asel;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2109,90 +2102,11 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
- ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
-
-fail:
- ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
- return ret;
-}
-
-/**
- * ti_sci_cmd_ring_get_config() - get RA ring configuration
- * @handle: Pointer to TI SCI handle.
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: Returns ring's base address lo 32 bits
- * @addr_hi: Returns ring's base address hi 32 bits
- * @count: Returns number of ring elements
- * @mode: Returns mode of the ring
- * @size: Returns ring element size
- * @order_id: Returns ring's bus order ID
- *
- * Return: 0 if all went well, else returns appropriate error value.
- *
- * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
- */
-static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
- u32 nav_id, u32 index, u8 *mode,
- u32 *addr_lo, u32 *addr_hi,
- u32 *count, u8 *size, u8 *order_id)
-{
- struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
- struct ti_sci_msg_rm_ring_get_cfg_req *req;
- struct ti_sci_xfer *xfer;
- struct ti_sci_info *info;
- struct device *dev;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(handle))
- return -EINVAL;
-
- info = handle_to_ti_sci_info(handle);
- dev = info->dev;
-
- xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
- sizeof(*req), sizeof(*resp));
- if (IS_ERR(xfer)) {
- ret = PTR_ERR(xfer);
- dev_err(dev,
- "RM_RA:Message get config failed(%d)\n", ret);
- return ret;
- }
- req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
- req->nav_id = nav_id;
- req->index = index;
-
- ret = ti_sci_do_xfer(info, xfer);
- if (ret) {
- dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
- goto fail;
- }
-
- resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- ret = -ENODEV;
- } else {
- if (mode)
- *mode = resp->mode;
- if (addr_lo)
- *addr_lo = resp->addr_lo;
- if (addr_hi)
- *addr_hi = resp->addr_hi;
- if (count)
- *count = resp->count;
- if (size)
- *size = resp->size;
- if (order_id)
- *order_id = resp->order_id;
- };
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
return ret;
}
@@ -2362,6 +2276,8 @@ static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
req->fdepth = params->fdepth;
req->tx_sched_priority = params->tx_sched_priority;
req->tx_burst_size = params->tx_burst_size;
+ req->tx_tdtype = params->tx_tdtype;
+ req->extended_ch_type = params->extended_ch_type;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2921,8 +2837,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
iops->free_irq = ti_sci_cmd_free_irq;
iops->free_event_map = ti_sci_cmd_free_event_map;
- rops->config = ti_sci_cmd_ring_config;
- rops->get_config = ti_sci_cmd_ring_get_config;
+ rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
psilops->pair = ti_sci_cmd_rm_psil_pair;
psilops->unpair = ti_sci_cmd_rm_psil_unpair;
@@ -3157,12 +3072,18 @@ u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- free_bit = find_first_zero_bit(res->desc[set].res_map,
- res->desc[set].num);
- if (free_bit != res->desc[set].num) {
- set_bit(free_bit, res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+ int res_count = desc->num + desc->num_sec;
+
+ free_bit = find_first_zero_bit(desc->res_map, res_count);
+ if (free_bit != res_count) {
+ set_bit(free_bit, desc->res_map);
raw_spin_unlock_irqrestore(&res->lock, flags);
- return res->desc[set].start + free_bit;
+
+ if (desc->num && free_bit < desc->num)
+ return desc->start + free_bit;
+ else
+ return desc->start_sec + free_bit;
}
}
raw_spin_unlock_irqrestore(&res->lock, flags);
@@ -3183,10 +3104,14 @@ void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- if (res->desc[set].start <= id &&
- (res->desc[set].num + res->desc[set].start) > id)
- clear_bit(id - res->desc[set].start,
- res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+
+ if (desc->num && desc->start <= id &&
+ (desc->start + desc->num) > id)
+ clear_bit(id - desc->start, desc->res_map);
+ else if (desc->num_sec && desc->start_sec <= id &&
+ (desc->start_sec + desc->num_sec) > id)
+ clear_bit(id - desc->start_sec, desc->res_map);
}
raw_spin_unlock_irqrestore(&res->lock, flags);
}
@@ -3203,7 +3128,7 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
u32 set, count = 0;
for (set = 0; set < res->sets; set++)
- count += res->desc[set].num;
+ count += res->desc[set].num + res->desc[set].num_sec;
return count;
}
@@ -3227,7 +3152,7 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
{
struct ti_sci_resource *res;
bool valid_set = false;
- int i, ret;
+ int i, ret, res_count;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
@@ -3242,23 +3167,23 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
for (i = 0; i < res->sets; i++) {
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
sub_types[i],
- &res->desc[i].start,
- &res->desc[i].num);
+ &res->desc[i]);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
dev_id, sub_types[i]);
- res->desc[i].start = 0;
- res->desc[i].num = 0;
+ memset(&res->desc[i], 0, sizeof(res->desc[i]));
continue;
}
- dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
dev_id, sub_types[i], res->desc[i].start,
- res->desc[i].num);
+ res->desc[i].num, res->desc[i].start_sec,
+ res->desc[i].num_sec);
valid_set = true;
+ res_count = res->desc[i].num + res->desc[i].num_sec;
res->desc[i].res_map =
- devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+ devm_kzalloc(dev, BITS_TO_LONGS(res_count) *
sizeof(*res->desc[i].res_map), GFP_KERNEL);
if (!res->desc[i].res_map)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 57cd04062994..ef3a8214d002 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -49,7 +49,6 @@
#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
#define TI_SCI_MSG_RM_RING_RESET 0x1103
#define TI_SCI_MSG_RM_RING_CFG 0x1110
-#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
/* PSI-L requests */
#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
@@ -574,8 +573,10 @@ struct ti_sci_msg_req_get_resource_range {
/**
* struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
* @hdr: Generic Header
- * @range_start: Start index of the resource range.
- * @range_num: Number of resources in the range.
+ * @range_start: Start index of the first resource range.
+ * @range_num: Number of resources in the first range.
+ * @range_start_sec: Start index of the second resource range.
+ * @range_num_sec: Number of resources in the second range.
*
* Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
*/
@@ -583,6 +584,8 @@ struct ti_sci_msg_resp_get_resource_range {
struct ti_sci_msg_hdr hdr;
u16 range_start;
u16 range_num;
+ u16 range_start_sec;
+ u16 range_num_sec;
} __packed;
/**
@@ -656,6 +659,8 @@ struct ti_sci_msg_req_manage_irq {
* 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
* 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
* 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * 6 - Valid bit for @tisci_msg_rm_ring_cfg_req virtid
+ * 7 - Valid bit for @tisci_msg_rm_ring_cfg_req ASEL
* @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
* @index: ring index to be configured.
* @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
@@ -669,6 +674,9 @@ struct ti_sci_msg_req_manage_irq {
* the formula (log2(size_bytes) - 2), where size_bytes cannot be
* greater than 256.
* @order_id: Specifies the ring's bus order ID.
+ * @virtid: Ring virt ID value
+ * @asel: Ring ASEL (address select) value to be set into the ASEL field of the
+ * ring's RING_BA_HI register.
*/
struct ti_sci_msg_rm_ring_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -681,49 +689,8 @@ struct ti_sci_msg_rm_ring_cfg_req {
u8 mode;
u8 size;
u8 order_id;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
- *
- * Gets the configuration of the non-real-time register fields of a ring. The
- * host, or a supervisor of the host, who owns the ring must be the requesting
- * host. The values of the non-real-time registers are returned in
- * @ti_sci_msg_rm_ring_get_cfg_resp.
- *
- * @hdr: Generic Header
- * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
- * @index: ring index.
- */
-struct ti_sci_msg_rm_ring_get_cfg_req {
- struct ti_sci_msg_hdr hdr;
- u16 nav_id;
- u16 index;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
- *
- * Response received by host processor after RM has handled
- * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
- * non-real-time register values.
- *
- * @hdr: Generic Header
- * @addr_lo: Ring 32 LSBs of base address
- * @addr_hi: Ring 16 MSBs of base address.
- * @count: Ring number of elements.
- * @mode: Ring mode.
- * @size: encoded Ring element size
- * @order_id: ing order ID.
- */
-struct ti_sci_msg_rm_ring_get_cfg_resp {
- struct ti_sci_msg_hdr hdr;
- u32 addr_lo;
- u32 addr_hi;
- u32 count;
- u8 mode;
- u8 size;
- u8 order_id;
+ u16 virtid;
+ u8 asel;
} __packed;
/**
@@ -910,6 +877,8 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
* 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
* 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
* 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ * 15 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_tdtype
+ * 16 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::extended_ch_type
*
* @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
*
@@ -973,6 +942,15 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
*
* @tx_burst_size: UDMAP transmit channel burst size configuration to be
* programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ *
+ * @tx_tdtype: UDMAP transmit channel teardown type configuration to be
+ * programmed into the tdtype field of the TCHAN_TCFG register:
+ * 0 - Return immediately
+ * 1 - Wait for completion message from remote peer
+ *
+ * @extended_ch_type: Valid for BCDMA.
+ * 0 - the channel is split tx channel (tchan)
+ * 1 - the channel is block copy channel (bchan)
*/
struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -994,6 +972,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
u16 fdepth;
u8 tx_sched_priority;
u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
} __packed;
/**
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index fd95edeb702b..7eb9958662dd 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -615,13 +615,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
/**
* zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
*
- * @node_id Node ID of the device
- * @type Type of tap delay to set (input/output)
- * @value Value to set fot the tap delay
+ * @node_id: Node ID of the device
+ * @type: Type of tap delay to set (input/output)
+ * @value: Value to set fot the tap delay
*
* This function sets input/output tap delay for the SD device.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
@@ -633,12 +633,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
/**
* zynqmp_pm_sd_dll_reset() - Reset DLL logic
*
- * @node_id Node ID of the device
- * @type Reset type
+ * @node_id: Node ID of the device
+ * @type: Reset type
*
* This function resets DLL logic for the SD device.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
@@ -649,12 +649,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
/**
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
- * @index GGS register index
- * @value Register value to be written
+ * @index: GGS register index
+ * @value: Register value to be written
*
* This function writes value to GGS register.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_ggs(u32 index, u32 value)
{
@@ -665,12 +665,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
/**
* zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
- * @index GGS register index
- * @value Register value to be written
+ * @index: GGS register index
+ * @value: Register value to be written
*
* This function returns GGS register value.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_ggs(u32 index, u32 *value)
{
@@ -682,12 +682,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
/**
* zynqmp_pm_write_pggs() - PM API for writing persistent global general
* storage (pggs)
- * @index PGGS register index
- * @value Register value to be written
+ * @index: PGGS register index
+ * @value: Register value to be written
*
* This function writes value to PGGS register.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_pggs(u32 index, u32 value)
{
@@ -699,12 +699,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
/**
* zynqmp_pm_write_pggs() - PM API for reading persistent global general
* storage (pggs)
- * @index PGGS register index
- * @value Register value to be written
+ * @index: PGGS register index
+ * @value: Register value to be written
*
* This function returns PGGS register value.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_pggs(u32 index, u32 *value)
{
@@ -715,12 +715,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
/**
* zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
- * @value Status value to be written
+ * @value: Status value to be written
*
* This function sets healthy bit value to indicate boot health status
* to firmware.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_boot_health_status(u32 value)
{
@@ -815,10 +815,10 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
+ * Return: Returns status, either success or error+reason
+ *
* This API function is to be used for notify the power management controller
* about the completed power management initialization.
- *
- * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_init_finalize(void)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index bfe994230543..bdd37eadecd5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -829,8 +829,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base),
- 2000);
+ drm_crtc_index(&mtk_crtc->base));
if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 1d9e00b69462..5aa52b7afeec 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
struct device;
struct device_node;
@@ -35,39 +36,6 @@ enum mtk_ddp_comp_type {
MTK_DDP_COMP_TYPE_MAX,
};
-enum mtk_ddp_comp_id {
- DDP_COMPONENT_AAL0,
- DDP_COMPONENT_AAL1,
- DDP_COMPONENT_BLS,
- DDP_COMPONENT_CCORR,
- DDP_COMPONENT_COLOR0,
- DDP_COMPONENT_COLOR1,
- DDP_COMPONENT_DITHER,
- DDP_COMPONENT_DPI0,
- DDP_COMPONENT_DPI1,
- DDP_COMPONENT_DSI0,
- DDP_COMPONENT_DSI1,
- DDP_COMPONENT_DSI2,
- DDP_COMPONENT_DSI3,
- DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_OD0,
- DDP_COMPONENT_OD1,
- DDP_COMPONENT_OVL0,
- DDP_COMPONENT_OVL_2L0,
- DDP_COMPONENT_OVL_2L1,
- DDP_COMPONENT_OVL1,
- DDP_COMPONENT_PWM0,
- DDP_COMPONENT_PWM1,
- DDP_COMPONENT_PWM2,
- DDP_COMPONENT_RDMA0,
- DDP_COMPONENT_RDMA1,
- DDP_COMPONENT_RDMA2,
- DDP_COMPONENT_UFOE,
- DDP_COMPONENT_WDMA0,
- DDP_COMPONENT_WDMA1,
- DDP_COMPONENT_ID_MAX,
-};
-
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 55960cbb1019..522e51a404cc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -805,25 +805,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
ret = of_dma_configure(drm->dev, dev->of_node, true);
if (ret)
return ret;
- } else {
- /*
- * If we don't have the interconnect property, most likely
- * because of an old DT, we need to set the DMA offset by hand
- * on our device since the RAM mapping is at 0 for the DMA bus,
- * unlike the CPU.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- *
- * If we have two subsequent calls to dma_direct_set_offset
- * returns -EINVAL. Unfortunately, this happens when we have two
- * backends in the system, and will result in the driver
- * reporting an error while it has been setup properly before.
- * Ignore EINVAL, but it should really be removed eventually.
- */
- ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret && ret != -EINVAL)
- return ret;
}
backend->engine.node = dev->of_node;
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 09ce30cba54b..17d064e58938 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -30,7 +30,7 @@ static inline u64 __pow10(u8 x)
static int scmi_hwmon_scale(const struct scmi_sensor_info *sensor, u64 *value)
{
- s8 scale = sensor->scale;
+ int scale = sensor->scale;
u64 f;
switch (sensor->type) {
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index eb15c8c725ca..ec46cff80fdb 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -167,33 +167,6 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (!csi->traits)
return -EINVAL;
- /*
- * On Allwinner SoCs, some high memory bandwidth devices do DMA
- * directly over the memory bus (called MBUS), instead of the
- * system bus. The memory bus has a different addressing scheme
- * without the DRAM starting offset.
- *
- * In some cases this can be described by an interconnect in
- * the device tree. In other cases where the hardware is not
- * fully understood and the interconnect is left out of the
- * device tree, fall back to a default offset.
- */
- if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
- ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
- if (ret)
- return ret;
- } else {
- /*
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
-#ifdef PHYS_PFN_OFFSET
- ret = dma_direct_set_offset(csi->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
-#endif
- }
-
csi->mdev.dev = csi->dev;
strscpy(csi->mdev.model, "Allwinner Video Capture Device",
sizeof(csi->mdev.model));
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index e69e14379fc6..27935f1e9555 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -881,14 +881,6 @@ static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
return 0;
}
-/*
- * PHYS_OFFSET isn't available on all architectures. In order to
- * accommodate for COMPILE_TEST, let's define it to something dumb.
- */
-#if defined(CONFIG_COMPILE_TEST) && !defined(PHYS_OFFSET)
-#define PHYS_OFFSET 0
-#endif
-
static int sun6i_csi_probe(struct platform_device *pdev)
{
struct sun6i_csi_dev *sdev;
@@ -899,15 +891,6 @@ static int sun6i_csi_probe(struct platform_device *pdev)
return -ENOMEM;
sdev->dev = &pdev->dev;
- /*
- * The DMA bus has the memory mapped at 0.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
- ret = dma_direct_set_offset(sdev->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
ret = sun6i_csi_resource_request(sdev, pdev);
if (ret)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index ba5d07886607..ed863bf5ea80 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -825,10 +825,6 @@ static int deinterlace_probe(struct platform_device *pdev)
return ret;
}
- ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
- if (ret)
- return ret;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->base))
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 00e013b14703..3ea6913df176 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -128,7 +128,7 @@ config OMAP_GPMC_DEBUG
config TI_EMIF_SRAM
tristate "Texas Instruments EMIF SRAM driver"
- depends on SOC_AM33XX || SOC_AM43XX || (ARM && COMPILE_TEST)
+ depends on SOC_AM33XX || SOC_AM43XX || (ARM && CPU_V7 && COMPILE_TEST)
depends on SRAM
help
This driver is for the EMIF module available on Texas Instruments
@@ -191,8 +191,8 @@ config DA8XX_DDRCTL
config PL353_SMC
tristate "ARM PL35X Static Memory Controller(SMC) driver"
default y if ARM
- depends on ARM
- depends on ARM_AMBA || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
+ depends on ARM_AMBA
help
This driver is for the ARM PL351/PL353 Static Memory
Controller(SMC) module.
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index 3ec5cb0fce1e..555f7ac3b7dd 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -291,6 +291,8 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
nemc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
/*
* The driver currently only uses the registers up to offset
@@ -304,9 +306,9 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
}
nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
- if (IS_ERR(nemc->base)) {
+ if (!nemc->base) {
dev_err(dev, "failed to get I/O memory\n");
- return PTR_ERR(nemc->base);
+ return -ENOMEM;
}
writel(0, nemc->base + NEMC_NFCSR);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 691e4c344cf8..ac350f8d1e20 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -268,6 +268,10 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
/* IPU0 | IPU1 | CCU */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+};
+
static const struct of_device_id mtk_smi_larb_of_ids[] = {
{
.compatible = "mediatek,mt8167-smi-larb",
@@ -293,6 +297,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.compatible = "mediatek,mt8183-smi-larb",
.data = &mtk_smi_larb_mt8183
},
+ {
+ .compatible = "mediatek,mt8192-smi-larb",
+ .data = &mtk_smi_larb_mt8192
+ },
{}
};
@@ -432,6 +440,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
+ .gen = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(6),
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
{
.compatible = "mediatek,mt8173-smi-common",
@@ -457,6 +472,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.compatible = "mediatek,mt8183-smi-common",
.data = &mtk_smi_common_mt8183,
},
+ {
+ .compatible = "mediatek,mt8192-smi-common",
+ .data = &mtk_smi_common_mt8192,
+ },
{}
};
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index f2a33a1af836..8d36e221def1 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -204,18 +203,6 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
}
EXPORT_SYMBOL(rpcif_sw_init);
-void rpcif_enable_rpm(struct rpcif *rpc)
-{
- pm_runtime_enable(rpc->dev);
-}
-EXPORT_SYMBOL(rpcif_enable_rpm);
-
-void rpcif_disable_rpm(struct rpcif *rpc)
-{
- pm_runtime_put_sync(rpc->dev);
-}
-EXPORT_SYMBOL(rpcif_disable_rpm);
-
void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
{
u32 dummy;
@@ -508,7 +495,8 @@ exit:
return ret;
err_out:
- ret = reset_control_reset(rpc->rstc);
+ if (reset_control_reset(rpc->rstc))
+ dev_err(rpc->dev, "Failed to reset HW\n");
rpcif_hw_init(rpc, rpc->bus_size == 2);
goto exit;
}
@@ -560,9 +548,11 @@ static int rpcif_probe(struct platform_device *pdev)
} else if (of_device_is_compatible(flash, "cfi-flash")) {
name = "rpc-if-hyperflash";
} else {
+ of_node_put(flash);
dev_warn(&pdev->dev, "unknown flash type\n");
return -ENODEV;
}
+ of_node_put(flash);
vdev = platform_device_alloc(name, pdev->id);
if (!vdev)
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 9f0a96bf9ccc..ca7077a06f4c 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -3,14 +3,17 @@ config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
default y
depends on ARCH_TEGRA
+ select INTERCONNECT
help
This driver supports the Memory Controller (MC) hardware found on
NVIDIA Tegra SoCs.
config TEGRA20_EMC
- bool "NVIDIA Tegra20 External Memory Controller driver"
+ tristate "NVIDIA Tegra20 External Memory Controller driver"
default y
- depends on ARCH_TEGRA_2x_SOC
+ depends on TEGRA_MC && ARCH_TEGRA_2x_SOC
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_DEVFREQ
help
This driver is for the External Memory Controller (EMC) found on
Tegra20 chips. The EMC controls the external DRAM on the board.
@@ -18,9 +21,10 @@ config TEGRA20_EMC
external memory.
config TEGRA30_EMC
- bool "NVIDIA Tegra30 External Memory Controller driver"
+ tristate "NVIDIA Tegra30 External Memory Controller driver"
default y
depends on TEGRA_MC && ARCH_TEGRA_3x_SOC
+ select PM_OPP
help
This driver is for the External Memory Controller (EMC) found on
Tegra30 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index ec8403557ed4..44064de962c2 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -42,6 +43,54 @@ static const struct of_device_id tegra_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
+static void tegra_mc_devm_action_put_device(void *data)
+{
+ struct tegra_mc *mc = data;
+
+ put_device(mc->dev);
+}
+
+/**
+ * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
+ * @dev: device pointer for the consumer device
+ *
+ * This function will search for the Memory Controller node in a device-tree
+ * and retrieve the Memory Controller handle.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
+ */
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct tegra_mc *mc;
+ int err;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ mc = platform_get_drvdata(pdev);
+ if (!mc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ err = devm_add_action(dev, tegra_mc_devm_action_put_device, mc);
+ if (err) {
+ put_device(mc->dev);
+ return ERR_PTR(err);
+ }
+
+ return mc;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
+
static int tegra_mc_block_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
@@ -298,6 +347,7 @@ int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
return 0;
}
+EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
{
@@ -309,6 +359,7 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
return dram_count;
}
+EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
static int load_one_timing(struct tegra_mc *mc,
struct tegra_mc_timing *timing,
@@ -591,6 +642,101 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * Memory Controller (MC) has few Memory Clients that are issuing memory
+ * bandwidth allocation requests to the MC interconnect provider. The MC
+ * provider aggregates the requests and then sends the aggregated request
+ * up to the External Memory Controller (EMC) interconnect provider which
+ * re-configures hardware interface to External Memory (EMEM) in accordance
+ * to the required bandwidth. Each MC interconnect node represents an
+ * individual Memory Client.
+ *
+ * Memory interconnect topology:
+ *
+ * +----+
+ * +--------+ | |
+ * | TEXSRD +--->+ |
+ * +--------+ | |
+ * | | +-----+ +------+
+ * ... | MC +--->+ EMC +--->+ EMEM |
+ * | | +-----+ +------+
+ * +--------+ | |
+ * | DISP.. +--->+ |
+ * +--------+ | |
+ * +----+
+ */
+static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+{
+ struct icc_node *node;
+ unsigned int i;
+ int err;
+
+ /* older device-trees don't have interconnect properties */
+ if (!device_property_present(mc->dev, "#interconnect-cells") ||
+ !mc->soc->icc_ops)
+ return 0;
+
+ mc->provider.dev = mc->dev;
+ mc->provider.data = &mc->provider;
+ mc->provider.set = mc->soc->icc_ops->set;
+ mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+
+ err = icc_provider_add(&mc->provider);
+ if (err)
+ return err;
+
+ /* create Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_MC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "Memory Controller";
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Controller to External Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_EMC);
+ if (err)
+ goto remove_nodes;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ /* create MC client node */
+ node = icc_node_create(mc->soc->clients[i].id);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = mc->soc->clients[i].name;
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Client to Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_MC);
+ if (err)
+ goto remove_nodes;
+ }
+
+ /*
+ * MC driver is registered too early, so early that generic driver
+ * syncing doesn't work for the MC. But it doesn't really matter
+ * since syncing works for the EMC drivers, hence we can sync the
+ * MC driver by ourselves and then EMC will complete syncing of
+ * the whole ICC state.
+ */
+ icc_sync_state(mc->dev);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&mc->provider);
+del_provider:
+ icc_provider_del(&mc->provider);
+
+ return err;
+}
+
static int tegra_mc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -659,10 +805,8 @@ static int tegra_mc_probe(struct platform_device *pdev)
}
mc->irq = platform_get_irq(pdev, 0);
- if (mc->irq < 0) {
- dev_err(&pdev->dev, "interrupt not specified\n");
+ if (mc->irq < 0)
return mc->irq;
- }
WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
@@ -681,6 +825,11 @@ static int tegra_mc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register reset controller: %d\n",
err);
+ err = tegra_mc_interconnect_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
+ err);
+
if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
if (IS_ERR(mc->smmu)) {
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index afa3ba45c9e6..33e40d600592 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -78,6 +78,20 @@
#define MC_TIMING_UPDATE BIT(0)
+static inline u32 tegra_mc_scale_percents(u64 val, unsigned int percents)
+{
+ val = val * percents;
+ do_div(val, 100);
+
+ return min_t(u64, val, U32_MAX);
+}
+
+static inline struct tegra_mc *
+icc_provider_to_tegra_mc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_mc, provider);
+}
+
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
return readl_relaxed(mc->regs + offset);
@@ -115,4 +129,12 @@ extern const struct tegra_mc_soc tegra132_mc_soc;
extern const struct tegra_mc_soc tegra210_mc_soc;
#endif
+/*
+ * These IDs are for internal use of Tegra ICC drivers. The ID numbers are
+ * chosen such that they don't conflict with the device-tree ICC node IDs.
+ */
+#define TEGRA_ICC_MC 1000
+#define TEGRA_ICC_EMC 1001
+#define TEGRA_ICC_EMEM 1002
+
#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 48ef01c3ff90..ed376ba2d2fe 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -15,6 +15,12 @@ static const struct tegra_mc_client tegra114_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
}, {
.id = 0x01,
.name = "display0a",
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 76ace42a688a..ee8ee39e98ed 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1177,10 +1177,8 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
static int tegra_emc_probe(struct platform_device *pdev)
{
- struct platform_device *mc;
struct device_node *np;
struct tegra_emc *emc;
- struct resource *res;
u32 ram_code;
int err;
@@ -1190,25 +1188,13 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emc->regs = devm_ioremap_resource(&pdev->dev, res);
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller\n");
- return -ENOENT;
- }
-
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
-
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc)
- return -EPROBE_DEFER;
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
ram_code = tegra_read_ram_code();
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 0cede24479bf..e2389573d3c0 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -15,6 +15,12 @@ static const struct tegra_mc_client tegra124_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
}, {
.id = 0x01,
.name = "display0a",
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 027f46287dbf..686aaf477d8a 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -8,19 +8,27 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
+#include <linux/devfreq.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "mc.h"
+
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
@@ -62,6 +70,11 @@
#define EMC_ODT_READ 0x0b4
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG6 0x114
+#define EMC_STAT_CONTROL 0x160
+#define EMC_STAT_LLMC_CONTROL 0x178
+#define EMC_STAT_PWR_CLOCK_LIMIT 0x198
+#define EMC_STAT_PWR_CLOCKS 0x19c
+#define EMC_STAT_PWR_COUNT 0x1a0
#define EMC_AUTO_CAL_INTERVAL 0x2a8
#define EMC_CFG_2 0x2b8
#define EMC_CFG_DIG_DLL 0x2bc
@@ -88,6 +101,12 @@
#define EMC_DBG_READ_DQM_CTRL BIT(9)
#define EMC_DBG_CFG_PRIORITY BIT(24)
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+
+#define EMC_PWR_GATHER_CLEAR (1 << 8)
+#define EMC_PWR_GATHER_DISABLE (2 << 8)
+#define EMC_PWR_GATHER_ENABLE (3 << 8)
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -142,11 +161,26 @@ struct emc_timing {
u32 data[ARRAY_SIZE(emc_timing_registers)];
};
+enum emc_rate_request_type {
+ EMC_RATE_DEVFREQ,
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
+ unsigned int dram_bus_width;
struct emc_timing *timings;
unsigned int num_timings;
@@ -156,6 +190,17 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ struct devfreq_simple_ondemand_data ondemand_data;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -383,6 +428,11 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
u32 value, ram_code;
int err;
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code"))
return of_node_get(dev->of_node);
@@ -408,7 +458,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 emc_cfg, emc_dbg;
+ u32 emc_cfg, emc_dbg, emc_fbio;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -439,6 +489,15 @@ static int emc_setup_hw(struct tegra_emc *emc)
emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16)
+ emc->dram_bus_width = 16;
+ else
+ emc->dram_bus_width = 32;
+
+ dev_info(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+
return 0;
}
@@ -451,6 +510,9 @@ static long emc_round_rate(unsigned long rate,
struct tegra_emc *emc = arg;
unsigned int i;
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
for (i = 0; i < emc->num_timings; i++) {
@@ -480,6 +542,83 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -563,7 +702,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -593,7 +732,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -650,47 +789,330 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ int err;
+
+ /*
+ * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
+ * is sampled on both clock edges. This means that EMC clock rate
+ * equals to the peak data-rate.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc;
+ struct icc_node *node;
+ int err;
+
+ emc->mc = devm_tegra_memory_controller_get(emc->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ soc = emc->mc->soc;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_process_id);
+ struct opp_table *clk_opp_table, *hw_opp_table;
+ int err;
+
+ clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
+ err = PTR_ERR_OR_ZERO(clk_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ goto put_clk_table;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+put_clk_table:
+ dev_pm_opp_put_clkname(clk_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for %lu Hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ rate = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
+}
+
+static int tegra_emc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /* freeze counters */
+ writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL);
+
+ /*
+ * busy_time: number of clocks EMC request was accepted
+ * total_time: number of clocks PWR_GATHER control was set to ENABLE
+ */
+ stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT);
+ stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS);
+ stat->current_frequency = clk_get_rate(emc->clk);
+
+ /* clear counters and restart */
+ writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
+ .polling_ms = 30,
+ .target = tegra_emc_devfreq_target,
+ .get_dev_status = tegra_emc_devfreq_get_dev_status,
+};
+
+static int tegra_emc_devfreq_init(struct tegra_emc *emc)
+{
+ struct devfreq *devfreq;
+
+ /*
+ * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
+ * should be less than 50. Secondly, multiple active memory clients
+ * may cause over 20% of lost clock cycles due to stalls caused by
+ * competing memory accesses. This means that threshold should be
+ * set to a less than 30 in order to have a properly working governor.
+ */
+ emc->ondemand_data.upthreshold = 20;
+
+ /*
+ * Reset statistic gathers state, select global bandwidth for the
+ * statistics collection mode and set clocks counter saturation
+ * limit to maximum.
+ */
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
+ writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
+
+ devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &emc->ondemand_data);
+ if (IS_ERR(devfreq)) {
+ dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
+ return PTR_ERR(devfreq);
+ }
+
+ return 0;
+}
+
static int tegra_emc_probe(struct platform_device *pdev)
{
struct device_node *np;
struct tegra_emc *emc;
- struct resource *res;
int irq, err;
- /* driver has nothing to do in a case of memory timing absence */
- if (of_get_child_count(pdev->dev.of_node) == 0) {
- dev_info(&pdev->dev,
- "EMC device tree node doesn't have memory timings\n");
- return 0;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "interrupt not specified\n");
dev_err(&pdev->dev, "please update your device tree\n");
return irq;
}
- np = tegra_emc_find_node_by_ram_code(&pdev->dev);
- if (!np)
- return -EINVAL;
-
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
- if (!emc) {
- of_node_put(np);
+ if (!emc)
return -ENOMEM;
- }
+ mutex_init(&emc->rate_lock);
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
+ np = tegra_emc_find_node_by_ram_code(&pdev->dev);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emc->regs = devm_ioremap_resource(&pdev->dev, res);
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -701,41 +1123,39 @@ static int tegra_emc_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
- dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", irq, err);
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
return err;
}
- tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
-
- emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- goto unset_cb;
- }
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
- err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
- err);
- goto unset_cb;
- }
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+ tegra_emc_devfreq_init(emc);
- return 0;
-
-unset_cb:
- tegra20_clk_set_emc_round_callback(NULL, NULL);
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
- return err;
+ return 0;
}
static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra20-emc", },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct platform_driver tegra_emc_driver = {
.probe = tegra_emc_probe,
@@ -743,11 +1163,11 @@ static struct platform_driver tegra_emc_driver = {
.name = "tegra20-emc",
.of_match_table = tegra_emc_of_match,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int __init tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index a8098bff91d9..29ecf02805a0 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -3,6 +3,10 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
#include <dt-bindings/memory/tegra20-mc.h>
#include "mc.h"
@@ -280,6 +284,78 @@ static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
.reset_status = tegra20_mc_reset_status,
};
+static int tegra20_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /*
+ * It should be possible to tune arbitration knobs here, but the
+ * default values are known to work well on all devices. Hence
+ * nothing to do here so far.
+ */
+ return 0;
+}
+
+static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 300);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ /* these clients are isochronous by default */
+ if (strstarts(node->name, "display") ||
+ strstarts(node->name, "vi"))
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ else
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra20_mc_icc_ops = {
+ .xlate_extended = tegra20_mc_of_icc_xlate_extended,
+ .aggregate = tegra20_mc_icc_aggreate,
+ .set = tegra20_mc_icc_set,
+};
+
const struct tegra_mc_soc tegra20_mc_soc = {
.clients = tegra20_mc_clients,
.num_clients = ARRAY_SIZE(tegra20_mc_clients),
@@ -290,4 +366,5 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.reset_ops = &tegra20_mc_reset_ops,
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
+ .icc_ops = &tegra20_mc_icc_ops,
};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index cdd663ba4733..5f224796e32e 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1828,7 +1828,6 @@ static int tegra210_emc_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cd;
unsigned long current_rate;
- struct platform_device *mc;
struct tegra210_emc *emc;
struct device_node *np;
unsigned int i;
@@ -1846,35 +1845,19 @@ static int tegra210_emc_probe(struct platform_device *pdev)
spin_lock_init(&emc->lock);
emc->dev = &pdev->dev;
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller\n");
- return -ENOENT;
- }
-
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
-
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc) {
- put_device(&mc->dev);
- return -EPROBE_DEFER;
- }
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
emc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(emc->regs)) {
- err = PTR_ERR(emc->regs);
- goto put_mc;
- }
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
for (i = 0; i < 2; i++) {
emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
- if (IS_ERR(emc->channel[i])) {
- err = PTR_ERR(emc->channel[i]);
- goto put_mc;
- }
+ if (IS_ERR(emc->channel[i]))
+ return PTR_ERR(emc->channel[i]);
+
}
tegra210_emc_detect(emc);
@@ -1884,7 +1867,7 @@ static int tegra210_emc_probe(struct platform_device *pdev)
err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
if (err < 0) {
dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
- goto put_mc;
+ return err;
}
err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
@@ -2015,8 +1998,7 @@ detach:
tegra210_clk_emc_detach(emc->clk);
release:
of_reserved_mem_device_release(emc->dev);
-put_mc:
- put_device(emc->mc->dev);
+
return err;
}
@@ -2027,7 +2009,6 @@ static int tegra210_emc_remove(struct platform_device *pdev)
debugfs_remove_recursive(emc->debugfs.root);
tegra210_clk_emc_detach(emc->clk);
of_reserved_mem_device_release(emc->dev);
- put_device(emc->mc->dev);
return 0;
}
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 7fb8b5438bf4..b3bbc5a05ba1 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -24,7 +24,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e8,
.shift = 0,
.mask = 0xff,
- .def = 0xc2,
+ .def = 0x1e,
},
}, {
.id = 0x02,
@@ -38,7 +38,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f4,
.shift = 0,
.mask = 0xff,
- .def = 0xc6,
+ .def = 0x1e,
},
}, {
.id = 0x03,
@@ -52,7 +52,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e8,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x04,
@@ -66,7 +66,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f4,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x05,
@@ -80,7 +80,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2ec,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x06,
@@ -94,7 +94,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f8,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x0e,
@@ -108,7 +108,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e0,
.shift = 0,
.mask = 0xff,
- .def = 0x13,
+ .def = 0x2e,
},
}, {
.id = 0x0f,
@@ -136,7 +136,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f0,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x11,
@@ -150,7 +150,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2fc,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x15,
@@ -380,7 +380,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x350,
.shift = 16,
.mask = 0xff,
- .def = 0x65,
+ .def = 0x80,
},
}, {
.id = 0x44,
@@ -620,7 +620,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f0,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x60,
@@ -648,7 +648,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3bc,
.shift = 0,
.mask = 0xff,
- .def = 0x49,
+ .def = 0x5a,
},
}, {
.id = 0x62,
@@ -676,7 +676,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3c4,
.shift = 0,
.mask = 0xff,
- .def = 0x49,
+ .def = 0x5a,
},
}, {
.id = 0x64,
@@ -897,7 +897,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.bit = 1,
},
.la = {
- .reg = 0xb98,
+ .reg = 0x3e0,
.shift = 16,
.mask = 0xff,
.def = 0x80,
@@ -956,7 +956,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3ec,
.shift = 16,
.mask = 0xff,
- .def = 0xff,
+ .def = 0x80,
},
}, {
.id = 0x86,
@@ -1020,35 +1020,45 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
};
static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
- { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
- { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
{ .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
{ .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
- { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
{ .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
{ .name = "nvenc", .swgroup = TEGRA_SWGROUP_NVENC, .reg = 0x264 },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
{ .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
{ .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
- { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
{ .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
{ .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
- { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
- { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
{ .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
- { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "ppcs1", .swgroup = TEGRA_SWGROUP_PPCS1, .reg = 0x298 },
+ { .name = "dc1", .swgroup = TEGRA_SWGROUP_DC1, .reg = 0xa88 },
{ .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
{ .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
{ .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
{ .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
- { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
- { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "ppcs2", .swgroup = TEGRA_SWGROUP_PPCS2, .reg = 0xab0 },
{ .name = "nvdec", .swgroup = TEGRA_SWGROUP_NVDEC, .reg = 0xab4 },
{ .name = "ape", .swgroup = TEGRA_SWGROUP_APE, .reg = 0xab8 },
- { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
{ .name = "se", .swgroup = TEGRA_SWGROUP_SE, .reg = 0xabc },
+ { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
+ { .name = "hc1", .swgroup = TEGRA_SWGROUP_HC1, .reg = 0xac4 },
+ { .name = "se1", .swgroup = TEGRA_SWGROUP_SE1, .reg = 0xac8 },
{ .name = "axiap", .swgroup = TEGRA_SWGROUP_AXIAP, .reg = 0xacc },
{ .name = "etr", .swgroup = TEGRA_SWGROUP_ETR, .reg = 0xad0 },
{ .name = "tsecb", .swgroup = TEGRA_SWGROUP_TSECB, .reg = 0xad4 },
+ { .name = "tsec1", .swgroup = TEGRA_SWGROUP_TSEC1, .reg = 0xad8 },
+ { .name = "tsecb1", .swgroup = TEGRA_SWGROUP_TSECB1, .reg = 0xadc },
+ { .name = "nvdec1", .swgroup = TEGRA_SWGROUP_NVDEC1, .reg = 0xae0 },
};
static const unsigned int tegra210_group_display[] = {
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 055af0e08a2e..44ac155936aa 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -14,16 +14,21 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
#include "mc.h"
@@ -323,9 +328,21 @@ struct emc_timing {
bool emc_cfg_dyn_self_ref;
};
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
struct tegra_mc *mc;
+ struct icc_provider provider;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -352,6 +369,15 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
};
static int emc_seq_update_timing(struct tegra_emc *emc)
@@ -988,6 +1014,11 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
u32 value, ram_code;
int err;
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
ram_code = tegra_read_ram_code();
for_each_child_of_node(dev->of_node, np) {
@@ -1057,6 +1088,9 @@ static long emc_round_rate(unsigned long rate,
struct tegra_emc *emc = arg;
unsigned int i;
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
for (i = 0; i < emc->num_timings; i++) {
@@ -1086,6 +1120,83 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -1169,7 +1280,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1199,7 +1310,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1256,51 +1367,239 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
-static int tegra_emc_probe(struct platform_device *pdev)
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
{
- struct platform_device *mc;
- struct device_node *np;
- struct tegra_emc *emc;
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ const unsigned int dram_data_bus_width_bytes = 4;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra30 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
int err;
- if (of_get_child_count(pdev->dev.of_node) == 0) {
- dev_info(&pdev->dev,
- "device-tree node doesn't have memory timings\n");
- return -ENODEV;
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
}
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller node\n");
- return -ENOENT;
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
}
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
- np = emc_find_node_by_ram_code(&pdev->dev);
- if (!np)
- return -EINVAL;
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ struct opp_table *clk_opp_table, *hw_opp_table;
+ int err;
+
+ clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
+ err = PTR_ERR_OR_ZERO(clk_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ goto put_clk_table;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+put_clk_table:
+ dev_pm_opp_put_clkname(clk_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
- if (!emc) {
- of_node_put(np);
+ if (!emc)
return -ENOMEM;
- }
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc)
- return -EPROBE_DEFER;
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+ mutex_init(&emc->rate_lock);
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
- err = emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
+ np = emc_find_node_by_ram_code(&pdev->dev);
+ if (np) {
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
@@ -1311,10 +1610,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
err = platform_get_irq(pdev, 0);
- if (err < 0) {
- dev_err(&pdev->dev, "interrupt not specified: %d\n", err);
+ if (err < 0)
return err;
- }
+
emc->irq = err;
err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
@@ -1324,31 +1622,27 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
}
- tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
-
- emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- goto unset_cb;
- }
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
- err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
- err);
- goto unset_cb;
- }
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
- return 0;
-
-unset_cb:
- tegra20_clk_set_emc_round_callback(NULL, NULL);
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
- return err;
+ return 0;
}
static int tegra_emc_suspend(struct device *dev)
@@ -1393,6 +1687,7 @@ static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra30-emc", },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct platform_driver tegra_emc_driver = {
.probe = tegra_emc_probe,
@@ -1401,11 +1696,11 @@ static struct platform_driver tegra_emc_driver = {
.of_match_table = tegra_emc_of_match,
.pm = &tegra_emc_pm_ops,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int __init tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index fcdd812eed80..ea849003014b 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -4,7 +4,8 @@
*/
#include <linux/of.h>
-#include <linux/mm.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
#include <dt-bindings/memory/tegra30-mc.h>
@@ -36,6 +37,13 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ .fifo_size = 16 * 2,
}, {
.id = 0x01,
.name = "display0a",
@@ -50,6 +58,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x02,
.name = "display0ab",
@@ -64,6 +73,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x03,
.name = "display0b",
@@ -78,6 +88,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x04,
.name = "display0bb",
@@ -92,6 +103,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x05,
.name = "display0c",
@@ -106,6 +118,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x06,
.name = "display0cb",
@@ -120,6 +133,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x07,
.name = "display1b",
@@ -134,6 +148,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x08,
.name = "display1bb",
@@ -148,6 +163,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x09,
.name = "eppup",
@@ -162,6 +178,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x17,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0a,
.name = "g2pr",
@@ -176,6 +193,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x09,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x0b,
.name = "g2sr",
@@ -190,6 +208,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x09,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x0c,
.name = "mpeunifbr",
@@ -204,6 +223,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x50,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0d,
.name = "viruv",
@@ -218,6 +238,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2c,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0e,
.name = "afir",
@@ -232,6 +253,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x0f,
.name = "avpcarm7r",
@@ -246,6 +268,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x10,
.name = "displayhc",
@@ -260,6 +283,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x11,
.name = "displayhcb",
@@ -274,6 +298,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x12,
.name = "fdcdrd",
@@ -288,6 +313,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x13,
.name = "fdcdrd2",
@@ -302,6 +328,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x14,
.name = "g2dr",
@@ -316,6 +343,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x15,
.name = "hdar",
@@ -330,6 +358,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x16,
.name = "host1xdmar",
@@ -344,6 +373,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x05,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x17,
.name = "host1xr",
@@ -358,6 +388,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x50,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x18,
.name = "idxsrd",
@@ -372,6 +403,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x19,
.name = "idxsrd2",
@@ -386,6 +418,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x1a,
.name = "mpe_ipred",
@@ -400,6 +433,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x80,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x1b,
.name = "mpeamemrd",
@@ -414,6 +448,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x42,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x1c,
.name = "mpecsrd",
@@ -428,6 +463,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
@@ -442,6 +478,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
@@ -456,6 +493,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x1f,
.name = "satar",
@@ -470,6 +508,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x33,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x20,
.name = "texsrd",
@@ -484,6 +523,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x21,
.name = "texsrd2",
@@ -498,6 +538,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x22,
.name = "vdebsevr",
@@ -512,6 +553,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x23,
.name = "vdember",
@@ -526,6 +568,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xd0,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x24,
.name = "vdemcer",
@@ -540,6 +583,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2a,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x25,
.name = "vdetper",
@@ -554,6 +598,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x74,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x26,
.name = "mpcorelpr",
@@ -564,6 +609,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 14,
}, {
.id = 0x27,
.name = "mpcorer",
@@ -574,6 +620,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 14,
}, {
.id = 0x28,
.name = "eppu",
@@ -588,6 +635,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x29,
.name = "eppv",
@@ -602,6 +650,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2a,
.name = "eppy",
@@ -616,6 +665,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2b,
.name = "mpeunifbw",
@@ -630,6 +680,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x2c,
.name = "viwsb",
@@ -644,6 +695,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2d,
.name = "viwu",
@@ -658,6 +710,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xb2,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2e,
.name = "viwv",
@@ -672,6 +725,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xb2,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2f,
.name = "viwy",
@@ -686,6 +740,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x30,
.name = "g2dw",
@@ -700,6 +755,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x9,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x31,
.name = "afiw",
@@ -714,6 +770,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0c,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x32,
.name = "avpcarm7w",
@@ -728,6 +785,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x33,
.name = "fdcdwr",
@@ -742,6 +800,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x34,
.name = "fdcdwr2",
@@ -756,6 +815,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x35,
.name = "hdaw",
@@ -770,6 +830,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x36,
.name = "host1xw",
@@ -784,6 +845,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x37,
.name = "ispw",
@@ -798,6 +860,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x38,
.name = "mpcorelpw",
@@ -808,6 +871,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 24,
}, {
.id = 0x39,
.name = "mpcorew",
@@ -818,6 +882,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 24,
}, {
.id = 0x3a,
.name = "mpecswr",
@@ -832,6 +897,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
@@ -846,6 +912,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
@@ -860,6 +927,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x06,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x3d,
.name = "sataw",
@@ -874,6 +942,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x33,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x3e,
.name = "vdebsevw",
@@ -888,6 +957,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x3f,
.name = "vdedbgw",
@@ -902,6 +972,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x40,
.name = "vdembew",
@@ -916,6 +987,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x42,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x41,
.name = "vdetpmw",
@@ -930,6 +1002,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2a,
},
+ .fifo_size = 16 * 16,
},
};
@@ -1011,6 +1084,175 @@ static const struct tegra_mc_reset tegra30_mc_resets[] = {
TEGRA30_MC_RESET(VI, 0x200, 0x204, 17),
};
+static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int bandwidth_mbytes_sec)
+{
+ u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
+ const struct tegra_mc_la *la = &client->la;
+ unsigned int fifo_size = client->fifo_size;
+ u32 arb_nsec, la_ticks, value;
+
+ /* see 18.4.1 Client Configuration in Tegra3 TRM v03p */
+ if (bandwidth_mbytes_sec)
+ arb_nsec = fifo_size * NSEC_PER_USEC / bandwidth_mbytes_sec;
+ else
+ arb_nsec = U32_MAX;
+
+ /*
+ * Latency allowness should be set with consideration for the module's
+ * latency tolerance and internal buffering capabilities.
+ *
+ * Display memory clients use isochronous transfers and have very low
+ * tolerance to a belated transfers. Hence we need to compensate the
+ * memory arbitration imperfection for them in order to prevent FIFO
+ * underflow condition when memory bus is busy.
+ *
+ * VI clients also need a stronger compensation.
+ */
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_MPCORE:
+ case TEGRA_SWGROUP_PTC:
+ /*
+ * We always want lower latency for these clients, hence
+ * don't touch them.
+ */
+ return;
+
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 2;
+ break;
+
+ case TEGRA_SWGROUP_VI:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 1;
+ break;
+
+ default:
+ arb_tolerance_compensation_nsec = 150;
+ arb_tolerance_compensation_div = 1;
+ break;
+ }
+
+ if (arb_nsec > arb_tolerance_compensation_nsec)
+ arb_nsec -= arb_tolerance_compensation_nsec;
+ else
+ arb_nsec = 0;
+
+ arb_nsec /= arb_tolerance_compensation_div;
+
+ /*
+ * Latency allowance is a number of ticks a request from a particular
+ * client may wait in the EMEM arbiter before it becomes a high-priority
+ * request.
+ */
+ la_ticks = arb_nsec / mc->tick;
+ la_ticks = min(la_ticks, la->mask);
+
+ value = mc_readl(mc, la->reg);
+ value &= ~(la->mask << la->shift);
+ value |= la_ticks << la->shift;
+ mc_writel(mc, value, la->reg);
+}
+
+static int tegra30_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(src->provider);
+ const struct tegra_mc_client *client = &mc->soc->clients[src->id];
+ u64 peak_bandwidth = icc_units_to_bps(src->peak_bw);
+
+ /*
+ * Skip pre-initialization that is done by icc_node_add(), which sets
+ * bandwidth to maximum for all clients before drivers are loaded.
+ *
+ * This doesn't make sense for us because we don't have drivers for all
+ * clients and it's okay to keep configuration left from bootloader
+ * during boot, at least for today.
+ */
+ if (src == dst)
+ return 0;
+
+ /* convert bytes/sec to megabytes/sec */
+ do_div(peak_bandwidth, 1000000);
+
+ tegra30_mc_tune_client_latency(mc, client, peak_bandwidth);
+
+ return 0;
+}
+
+static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra30_mc_icc_ops = {
+ .xlate_extended = tegra30_mc_of_icc_xlate_extended,
+ .aggregate = tegra30_mc_icc_aggreate,
+ .set = tegra30_mc_icc_set,
+};
+
const struct tegra_mc_soc tegra30_mc_soc = {
.clients = tegra30_mc_clients,
.num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -1025,4 +1267,5 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra30_mc_resets,
.num_resets = ARRAY_SIZE(tegra30_mc_resets),
+ .icc_ops = &tegra30_mc_icc_ops,
};
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 82d10b6661c7..884023e88345 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -258,6 +258,7 @@ config OMAP_CF
config AT91_CF
tristate "AT91 CompactFlash Controller"
depends on PCI
+ depends on OF
depends on PCMCIA && ARCH_AT91
help
Say Y here to support the CompactFlash controller on AT91 chips.
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 7db0e9c74dfc..6b1edfc890a3 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/platform_data/atmel.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
@@ -35,6 +34,17 @@
#define CF_IO_PHYS (1 << 23)
#define CF_MEM_PHYS (0x017ff800)
+struct at91_cf_data {
+ int irq_pin; /* I/O IRQ */
+ int det_pin; /* Card detect */
+ int vcc_pin; /* power switching */
+ int rst_pin; /* card reset */
+ u8 chipselect; /* EBI Chip Select number */
+ u8 flags;
+#define AT91_CF_TRUE_IDE 0x01
+#define AT91_IDE_SWAP_A0_A2 0x02
+};
+
struct regmap *mc;
/*--------------------------------------------------------------------------*/
@@ -209,16 +219,18 @@ static struct pccard_operations at91_cf_ops = {
/*--------------------------------------------------------------------------*/
-#if defined(CONFIG_OF)
static const struct of_device_id at91_cf_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-cf" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_cf_dt_ids);
-static int at91_cf_dt_init(struct platform_device *pdev)
+static int at91_cf_probe(struct platform_device *pdev)
{
- struct at91_cf_data *board;
+ struct at91_cf_socket *cf;
+ struct at91_cf_data *board;
+ struct resource *io;
+ int status;
board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
if (!board)
@@ -229,33 +241,9 @@ static int at91_cf_dt_init(struct platform_device *pdev)
board->vcc_pin = of_get_gpio(pdev->dev.of_node, 2);
board->rst_pin = of_get_gpio(pdev->dev.of_node, 3);
- pdev->dev.platform_data = board;
-
mc = syscon_regmap_lookup_by_compatible("atmel,at91rm9200-sdramc");
-
- return PTR_ERR_OR_ZERO(mc);
-}
-#else
-static int at91_cf_dt_init(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-#endif
-
-static int at91_cf_probe(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf;
- struct at91_cf_data *board = pdev->dev.platform_data;
- struct resource *io;
- int status;
-
- if (!board) {
- status = at91_cf_dt_init(pdev);
- if (status)
- return status;
-
- board = pdev->dev.platform_data;
- }
+ if (IS_ERR(mc))
+ return PTR_ERR(mc);
if (!gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
return -ENODEV;
@@ -399,7 +387,7 @@ static int at91_cf_resume(struct platform_device *pdev)
static struct platform_driver at91_cf_driver = {
.driver = {
.name = "at91_cf",
- .of_match_table = of_match_ptr(at91_cf_dt_ids),
+ .of_match_table = at91_cf_dt_ids,
},
.probe = at91_cf_probe,
.remove = at91_cf_remove,
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index b9349d684258..92d387dfc03b 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
+#include <linux/reset.h>
#include <linux/platform_data/wkup_m3.h>
@@ -43,11 +44,13 @@ struct wkup_m3_mem {
* @rproc: rproc handle
* @pdev: pointer to platform device
* @mem: WkupM3 memory information
+ * @rsts: reset control
*/
struct wkup_m3_rproc {
struct rproc *rproc;
struct platform_device *pdev;
struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+ struct reset_control *rsts;
};
static int wkup_m3_rproc_start(struct rproc *rproc)
@@ -56,13 +59,16 @@ static int wkup_m3_rproc_start(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->deassert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_deassert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->deassert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to reset wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static int wkup_m3_rproc_stop(struct rproc *rproc)
@@ -71,13 +77,16 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->assert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_assert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->assert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to assert reset of wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
@@ -132,12 +141,6 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
int ret;
int i;
- if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
- pdata->reset_name)) {
- dev_err(dev, "Platform data missing!\n");
- return -ENODEV;
- }
-
ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
&fw_name);
if (ret) {
@@ -165,6 +168,18 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
wkupm3->rproc = rproc;
wkupm3->pdev = pdev;
+ wkupm3->rsts = devm_reset_control_get_optional_shared(dev, "rstctrl");
+ if (IS_ERR(wkupm3->rsts))
+ return PTR_ERR(wkupm3->rsts);
+ if (!wkupm3->rsts) {
+ if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+ pdata->reset_name)) {
+ dev_err(dev, "Platform data missing!\n");
+ ret = -ENODEV;
+ goto err_put_rproc;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
@@ -173,7 +188,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
i);
ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
- goto err;
+ goto err_put_rproc;
}
wkupm3->mem[i].bus_addr = res->start;
wkupm3->mem[i].size = resource_size(res);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index dceec715e745..71ab75a46491 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -102,7 +102,8 @@ config RESET_LPC18XX
This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
config RESET_MESON
- bool "Meson Reset Driver" if COMPILE_TEST
+ tristate "Meson Reset Driver"
+ depends on ARCH_MESON || COMPILE_TEST
default ARCH_MESON
help
This enables the reset driver for Amlogic Meson SoCs.
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index a2df88e90011..34e89aa0fb5e 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -208,6 +208,39 @@ static int reset_control_array_reset(struct reset_control_array *resets)
return 0;
}
+static int reset_control_array_rearm(struct reset_control_array *resets)
+{
+ struct reset_control *rstc;
+ int i;
+
+ for (i = 0; i < resets->num_rstcs; i++) {
+ rstc = resets->rstc[i];
+
+ if (!rstc)
+ continue;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+ } else {
+ if (!rstc->acquired)
+ return -EPERM;
+ }
+ }
+
+ for (i = 0; i < resets->num_rstcs; i++) {
+ rstc = resets->rstc[i];
+
+ if (rstc && rstc->shared)
+ WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
+ }
+
+ return 0;
+}
+
static int reset_control_array_assert(struct reset_control_array *resets)
{
int ret, i;
@@ -326,6 +359,46 @@ int reset_control_reset(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_reset);
/**
+ * reset_control_rearm - allow shared reset line to be re-triggered"
+ * @rstc: reset controller
+ *
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance, except if this call is used.
+ *
+ * Calls to this function must be balanced with calls to reset_control_reset,
+ * a warning is thrown in case triggered_count ever dips below 0.
+ *
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset or reset_control_rearm have been used.
+ *
+ * If rstc is NULL the function will just return 0.
+ */
+int reset_control_rearm(struct reset_control *rstc)
+{
+ if (!rstc)
+ return 0;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (reset_control_is_array(rstc))
+ return reset_control_array_rearm(rstc_to_array(rstc));
+
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
+ } else {
+ if (!rstc->acquired)
+ return -EPERM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reset_control_rearm);
+
+/**
* reset_control_assert - asserts the reset line
* @rstc: reset controller
*
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index 94d7ba88d7d2..c9bc325ad65a 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -104,6 +105,7 @@ static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
static int meson_reset_probe(struct platform_device *pdev)
{
@@ -142,4 +144,8 @@ static struct platform_driver meson_reset_driver = {
.of_match_table = meson_reset_dt_ids,
},
};
-builtin_platform_driver(meson_reset_driver);
+module_platform_driver(meson_reset_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index bdd984296196..2a72f861f798 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -44,7 +44,7 @@ static int a10_reset_init(struct device_node *np)
data->membase = ioremap(res.start, size);
if (!data->membase) {
ret = -ENOMEM;
- goto err_alloc;
+ goto release_region;
}
if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
@@ -59,7 +59,14 @@ static int a10_reset_init(struct device_node *np)
data->rcdev.of_node = np;
data->status_active_low = true;
- return reset_controller_register(&data->rcdev);
+ ret = reset_controller_register(&data->rcdev);
+ if (ret)
+ pr_err("unable to register device\n");
+
+ return ret;
+
+release_region:
+ release_mem_region(res.start, size);
err_alloc:
kfree(data);
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index ef97c4dbbb4e..218370faf37b 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -89,7 +89,7 @@ static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev,
mask = BIT(control->assert_bit);
value = (control->flags & ASSERT_SET) ? mask : 0x0;
- return regmap_update_bits(data->regmap, control->assert_offset, mask, value);
+ return regmap_write_bits(data->regmap, control->assert_offset, mask, value);
}
/**
@@ -120,7 +120,7 @@ static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev,
mask = BIT(control->deassert_bit);
value = (control->flags & DEASSERT_SET) ? mask : 0x0;
- return regmap_update_bits(data->regmap, control->deassert_offset, mask, value);
+ return regmap_write_bits(data->regmap, control->deassert_offset, mask, value);
}
/**
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 36452bed86ef..8fa43a2d17e7 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
-obj-$(CONFIG_SOC_ASPEED) += aspeed/
+obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 321c5e26a268..174a9b011461 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -9,7 +9,7 @@ config MESON_CANVAS
Say yes to support the canvas IP for Amlogic SoCs.
config MESON_CLK_MEASURE
- bool "Amlogic Meson SoC Clock Measure driver"
+ tristate "Amlogic Meson SoC Clock Measure driver"
depends on ARCH_MESON || COMPILE_TEST
default ARCH_MESON
select REGMAP_MMIO
@@ -19,7 +19,7 @@ config MESON_CLK_MEASURE
config MESON_GX_SOCINFO
bool "Amlogic Meson GX SoC Information driver"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on (ARM64 && ARCH_MESON) || COMPILE_TEST
default ARCH_MESON
select SOC_BUS
help
@@ -27,7 +27,7 @@ config MESON_GX_SOCINFO
information about the type, package and version.
config MESON_GX_PM_DOMAINS
- bool "Amlogic Meson GX Power Domains driver"
+ tristate "Amlogic Meson GX Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
@@ -38,7 +38,7 @@ config MESON_GX_PM_DOMAINS
Generic Power Domains.
config MESON_EE_PM_DOMAINS
- bool "Amlogic Meson Everything-Else Power Domains driver"
+ tristate "Amlogic Meson Everything-Else Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
@@ -49,7 +49,7 @@ config MESON_EE_PM_DOMAINS
Generic Power Domains.
config MESON_SECURE_PM_DOMAINS
- bool "Amlogic Meson Secure Power Domains driver"
+ tristate "Amlogic Meson Secure Power Domains driver"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
depends on PM && OF
depends on HAVE_ARM_SMCCC
@@ -63,7 +63,7 @@ config MESON_SECURE_PM_DOMAINS
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
default ARCH_MESON
select SOC_BUS
help
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index c655f5f92b12..d0329ad170d1 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -72,8 +72,10 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
* current state, this driver probe cannot return -EPROBE_DEFER
*/
canvas = dev_get_drvdata(&canvas_pdev->dev);
- if (!canvas)
+ if (!canvas) {
+ put_device(&canvas_pdev->dev);
return ERR_PTR(-EINVAL);
+ }
return canvas;
}
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 0fa47d77577d..e1957476a006 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/regmap.h>
+#include <linux/module.h>
static DEFINE_MUTEX(measure_lock);
@@ -681,6 +682,7 @@ static const struct of_device_id meson_msr_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_msr_match_table);
static struct platform_driver meson_msr_driver = {
.probe = meson_msr_probe,
@@ -689,4 +691,5 @@ static struct platform_driver meson_msr_driver = {
.of_match_table = meson_msr_match_table,
},
};
-builtin_platform_driver(meson_msr_driver);
+module_platform_driver(meson_msr_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 5164a4dc2352..50bf5d2b828b 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -14,6 +14,7 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/power/meson-axg-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
@@ -412,8 +413,7 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n",
count, dom->desc.name);
- dom->rstc = devm_reset_control_array_get(&pdev->dev, false,
- false);
+ dom->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(dom->rstc))
return PTR_ERR(dom->rstc);
}
@@ -602,6 +602,7 @@ static const struct of_device_id meson_ee_pwrc_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_ee_pwrc_match_table);
static struct platform_driver meson_ee_pwrc_driver = {
.probe = meson_ee_pwrc_probe,
@@ -611,4 +612,5 @@ static struct platform_driver meson_ee_pwrc_driver = {
.of_match_table = meson_ee_pwrc_match_table,
},
};
-builtin_platform_driver(meson_ee_pwrc_driver);
+module_platform_driver(meson_ee_pwrc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 21b4bc811c00..b4615b288625 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/module.h>
/* AO Offsets */
@@ -303,7 +304,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
return PTR_ERR(regmap_hhi);
}
- rstc = devm_reset_control_array_get(&pdev->dev, false, false);
+ rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rstc)) {
if (PTR_ERR(rstc) != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to get reset lines\n");
@@ -364,6 +365,7 @@ static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table);
static struct platform_driver meson_gx_pwrc_vpu_driver = {
.probe = meson_gx_pwrc_vpu_probe,
@@ -373,4 +375,5 @@ static struct platform_driver meson_gx_pwrc_vpu_driver = {
.of_match_table = meson_gx_pwrc_vpu_match_table,
},
};
-builtin_platform_driver(meson_gx_pwrc_vpu_driver);
+module_platform_driver(meson_gx_pwrc_vpu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index 5fb29a475879..59bd195fa9c9 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -13,6 +13,7 @@
#include <dt-bindings/power/meson-a1-power.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
+#include <linux/module.h>
#define PWRC_ON 1
#define PWRC_OFF 0
@@ -193,6 +194,7 @@ static const struct of_device_id meson_secure_pwrc_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_secure_pwrc_match_table);
static struct platform_driver meson_secure_pwrc_driver = {
.probe = meson_secure_pwrc_probe,
@@ -201,4 +203,5 @@ static struct platform_driver meson_secure_pwrc_driver = {
.of_match_table = meson_secure_pwrc_match_table,
},
};
-builtin_platform_driver(meson_secure_pwrc_driver);
+module_platform_driver(meson_secure_pwrc_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index c95fa30f1a76..243ca196e6ad 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -1,32 +1,47 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "Aspeed SoC drivers"
-config SOC_ASPEED
- def_bool y
- depends on ARCH_ASPEED || COMPILE_TEST
+if ARCH_ASPEED || COMPILE_TEST
+
+menu "ASPEED SoC drivers"
config ASPEED_LPC_CTRL
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
+ tristate "ASPEED LPC firmware cycle control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
help
- Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
- ioctl()s, the driver also provides a read/write interface to a BMC ram
- region where the host LPC read/write region can be buffered.
+ Control LPC firmware cycle mappings through ioctl()s. The driver
+ also provides a read/write interface to a BMC ram region where the
+ host LPC read/write region can be buffered.
config ASPEED_LPC_SNOOP
- tristate "Aspeed ast2500 HOST LPC snoop support"
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "ASPEED LPC snoop support"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
help
Provides a driver to control the LPC snoop interface which
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
config ASPEED_P2A_CTRL
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+ tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Control ASPEED P2A VGA MMIO to BMC mappings through ioctl()s. The
+ driver also provides an interface for userspace mappings to a
+ pre-defined region.
+
+config ASPEED_SOCINFO
+ bool "ASPEED SoC Information driver"
+ default ARCH_ASPEED
+ select SOC_BUS
+ default ARCH_ASPEED
help
- Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
- ioctl()s, the driver also provides an interface for userspace mappings to
- a pre-defined region.
+ Say yes to support decoding of ASPEED BMC information.
endmenu
+
+endif
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index b64be47f2b1f..fcab7192e1a4 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 01ed21e8bfee..439bcd6b8c4a 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -21,6 +22,9 @@
#define HICR5_ENL2H BIT(8)
#define HICR5_ENFWH BIT(10)
+#define HICR6 0x4
+#define SW_FWH2AHB BIT(17)
+
#define HICR7 0x8
#define HICR8 0xc
@@ -30,8 +34,9 @@ struct aspeed_lpc_ctrl {
struct clk *clk;
phys_addr_t mem_base;
resource_size_t mem_size;
- u32 pnor_size;
- u32 pnor_base;
+ u32 pnor_size;
+ u32 pnor_base;
+ bool fwh2ahb;
};
static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
@@ -177,6 +182,16 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
return rc;
/*
+ * Switch to FWH2AHB mode, AST2600 only.
+ *
+ * The other bits in this register are interrupt status bits
+ * that are cleared by writing 1. As we don't want to clear
+ * them, set only the bit of interest.
+ */
+ if (lpc_ctrl->fwh2ahb)
+ regmap_write(lpc_ctrl->regmap, HICR6, SW_FWH2AHB);
+
+ /*
* Enable LPC FHW cycles. This is required for the host to
* access the regions specified.
*/
@@ -241,6 +256,18 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
+
+ if (!is_power_of_2(lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory size must be a power of 2, got %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(lpc_ctrl->mem_base, lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory must be naturally aligned for size %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
}
lpc_ctrl->regmap = syscon_node_to_regmap(
@@ -261,6 +288,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
return rc;
}
+ if (of_device_is_compatible(dev->of_node, "aspeed,ast2600-lpc-ctrl"))
+ lpc_ctrl->fwh2ahb = true;
+
lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
lpc_ctrl->miscdev.name = DEVICE_NAME;
lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
@@ -291,6 +321,7 @@ static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
static const struct of_device_id aspeed_lpc_ctrl_match[] = {
{ .compatible = "aspeed,ast2400-lpc-ctrl" },
{ .compatible = "aspeed,ast2500-lpc-ctrl" },
+ { .compatible = "aspeed,ast2600-lpc-ctrl" },
{ },
};
@@ -308,4 +339,4 @@ module_platform_driver(aspeed_lpc_ctrl_driver);
MODULE_DEVICE_TABLE(of, aspeed_lpc_ctrl_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
-MODULE_DESCRIPTION("Control for aspeed 2400/2500 LPC HOST to BMC mappings");
+MODULE_DESCRIPTION("Control for ASPEED LPC HOST to BMC mappings");
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index f3d8d53ab84d..682ba0eb4eba 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -325,6 +325,8 @@ static const struct of_device_id aspeed_lpc_snoop_match[] = {
.data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-lpc-snoop",
.data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-lpc-snoop",
+ .data = &ast2500_model_data },
{ },
};
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
new file mode 100644
index 000000000000..773930e0cb10
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2019 IBM Corp. */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+static struct {
+ const char *name;
+ const u32 id;
+} const rev_table[] = {
+ /* AST2400 */
+ { "AST2400", 0x02000303 },
+ { "AST1400", 0x02010103 },
+ { "AST1250", 0x02010303 },
+ /* AST2500 */
+ { "AST2500", 0x04000303 },
+ { "AST2510", 0x04000103 },
+ { "AST2520", 0x04000203 },
+ { "AST2530", 0x04000403 },
+ /* AST2600 */
+ { "AST2600", 0x05000303 },
+ { "AST2620", 0x05010203 },
+};
+
+static const char *siliconid_to_name(u32 siliconid)
+{
+ unsigned int id = siliconid & 0xff00ffff;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rev_table) ; ++i) {
+ if (rev_table[i].id == id)
+ return rev_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static const char *siliconid_to_rev(u32 siliconid)
+{
+ unsigned int rev = (siliconid >> 16) & 0xff;
+
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 3:
+ return "A2";
+ }
+
+ return "??";
+}
+
+static int __init aspeed_socinfo_init(void)
+{
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ void __iomem *reg;
+ bool has_chipid = false;
+ u32 siliconid;
+ u32 chipid[2];
+ const char *machine = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, "aspeed,silicon-id");
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ siliconid = readl(reg);
+ iounmap(reg);
+
+ /* This is optional, the ast2400 does not have it */
+ reg = of_iomap(np, 1);
+ if (reg) {
+ has_chipid = true;
+ chipid[0] = readl(reg);
+ chipid[1] = readl(reg + 4);
+ iounmap(reg);
+ }
+ of_node_put(np);
+
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENODEV;
+
+ /*
+ * Machine: Romulus BMC
+ * Family: AST2500
+ * Revision: A1
+ * SoC ID: raw silicon revision id
+ * Serial Number: 64-bit chipid
+ */
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &machine);
+ if (machine)
+ attrs->machine = kstrdup(machine, GFP_KERNEL);
+ of_node_put(np);
+
+ attrs->family = siliconid_to_name(siliconid);
+ attrs->revision = siliconid_to_rev(siliconid);
+ attrs->soc_id = kasprintf(GFP_KERNEL, "%08x", siliconid);
+
+ if (has_chipid)
+ attrs->serial_number = kasprintf(GFP_KERNEL, "%08x%08x",
+ chipid[1], chipid[0]);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev)) {
+ kfree(attrs->soc_id);
+ kfree(attrs->serial_number);
+ kfree(attrs);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ASPEED %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ return 0;
+}
+early_initcall(aspeed_socinfo_init);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 55a1f57a4d8c..c4472b68b7c2 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -69,6 +69,12 @@ static const struct at91_soc __initconst socs[] = {
#endif
#ifdef CONFIG_SOC_SAM9X60
AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH,
+ "sam9x60 64MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH,
+ "sam9x60 128MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH,
+ "sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index ee652e4841a5..5849846a69d6 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -60,6 +60,9 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9CN11_EXID_MATCH 0x00000009
#define SAM9X60_EXID_MATCH 0x00000000
+#define SAM9X60_D5M_EXID_MATCH 0x00000001
+#define SAM9X60_D1G_EXID_MATCH 0x00000010
+#define SAM9X60_D6K_EXID_MATCH 0x00000011
#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index b1062334e608..a673fdffe216 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -111,6 +111,8 @@ enum bsp_initiate_command {
static struct brcmstb_pm_control ctrl;
+noinline int brcmstb_pm_s3_finish(void);
+
static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
void __iomem *ddr_phy_pll_status);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 659b4a570d5b..f13da4d7d1c5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -424,7 +424,7 @@ int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
/**
* qbman_swp_interrupt_set_inhibit() - write interrupt mask register
* @p: the given software portal object
- * @mask: The mask to set in SWP_IIR register
+ * @inhibit: whether to inhibit the IRQs
*/
void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
{
@@ -510,7 +510,7 @@ enum qb_enqueue_commands {
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
-/**
+/*
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
* default/starting state.
*/
@@ -522,7 +522,7 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d)
/**
* qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
* @d: the enqueue descriptor.
- * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * @respond_success: 1 = enqueue with response always; 0 = enqueue with
* rejections returned on a FQ.
*/
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
@@ -932,7 +932,7 @@ int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
/**
* qbman_swp_push_get() - Get the push dequeue setup
- * @p: the software portal object
+ * @s: the software portal object
* @channel_idx: the channel index to query
* @enabled: returned boolean to show whether the push dequeue is enabled
* for the given channel
@@ -947,7 +947,7 @@ void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
/**
* qbman_swp_push_set() - Enable or disable push dequeue
- * @p: the software portal object
+ * @s: the software portal object
* @channel_idx: the channel index (0 to 15)
* @enable: enable or disable push dequeue
*/
@@ -1046,6 +1046,7 @@ void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
/**
* qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
* @fqid: the frame queue index of the given FQ
*/
void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
@@ -1057,6 +1058,7 @@ void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
/**
* qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
* @wqid: composed of channel id and wqid within the channel
* @dct: the dequeue command type
*/
@@ -1071,6 +1073,7 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
/**
* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
* dequeues
+ * @d: the pull dequeue descriptor to be set
* @chid: the channel id to be dequeued
* @dct: the dequeue command type
*/
@@ -1398,6 +1401,7 @@ int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
/**
* qbman_release_desc_clear() - Clear the contents of a descriptor to
* default/starting state.
+ * @d: the pull dequeue descriptor to be cleared
*/
void qbman_release_desc_clear(struct qbman_release_desc *d)
{
@@ -1407,6 +1411,8 @@ void qbman_release_desc_clear(struct qbman_release_desc *d)
/**
* qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the pull dequeue descriptor to be set
+ * @bpid: the bpid value to be set
*/
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
{
@@ -1416,6 +1422,8 @@ void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
/**
* qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
* interrupt source should be asserted after the release command is completed.
+ * @d: the pull dequeue descriptor to be set
+ * @enable: enable (1) or disable (0) value
*/
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
{
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 101def7dc73d..a1b9be1d105a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2622,7 +2622,7 @@ int qman_shutdown_fq(u32 fqid)
union qm_mc_command *mcc;
union qm_mc_result *mcr;
int orl_empty, drain = 0, ret = 0;
- u32 channel, wq, res;
+ u32 channel, res;
u8 state;
p = get_affine_portal();
@@ -2655,7 +2655,7 @@ int qman_shutdown_fq(u32 fqid)
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
/* Need to store these since the MCR gets reused */
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
- wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+ qm_fqd_get_wq(&mcr->queryfq.fqd);
if (channel < qm_channel_pool1) {
channel_portal = get_portal_for_channel(channel);
@@ -2697,7 +2697,6 @@ int qman_shutdown_fq(u32 fqid)
* to dequeue from the channel the FQ is scheduled on
*/
int found_fqrn = 0;
- u16 dequeue_wq = 0;
/* Flag that we need to drain FQ */
drain = 1;
@@ -2705,11 +2704,8 @@ int qman_shutdown_fq(u32 fqid)
if (channel >= qm_channel_pool1 &&
channel < qm_channel_pool1 + 15) {
/* Pool channel, enable the bit in the portal */
- dequeue_wq = (channel -
- qm_channel_pool1 + 1)<<4 | wq;
} else if (channel < qm_channel_pool1) {
/* Dedicated channel */
- dequeue_wq = wq;
} else {
dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
fqid, channel);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 75075591f630..497a7e0fd027 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -231,7 +231,7 @@ EXPORT_SYMBOL(cpm_muram_offset);
/**
* cpm_muram_dma - turn a muram virtual address into a DMA address
- * @offset: virtual address from cpm_muram_addr() to convert
+ * @addr: virtual address from cpm_muram_addr() to convert
*/
dma_addr_t cpm_muram_dma(void __iomem *addr)
{
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index a093dbe6d2cb..4ace28cab314 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -2,7 +2,7 @@
//
// rcpm.c - Freescale QorIQ RCPM driver
//
-// Copyright 2019 NXP
+// Copyright 2019-2020 NXP
//
// Author: Ran Wang <ran.wang_1@nxp.com>
@@ -22,6 +22,28 @@ struct rcpm {
bool little_endian;
};
+#define SCFG_SPARECR8 0x051c
+
+static void copy_ippdexpcr1_setting(u32 val)
+{
+ struct device_node *np;
+ void __iomem *regs;
+ u32 reg_val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-scfg");
+ if (!np)
+ return;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return;
+
+ reg_val = ioread32be(regs + SCFG_SPARECR8);
+ iowrite32be(val | reg_val, regs + SCFG_SPARECR8);
+
+ iounmap(regs);
+}
+
/**
* rcpm_pm_prepare - performs device-level tasks associated with power
* management, such as programming related to the wakeup source control.
@@ -90,6 +112,17 @@ static int rcpm_pm_prepare(struct device *dev)
tmp |= ioread32be(address);
iowrite32be(tmp, address);
}
+ /*
+ * Workaround of errata A-008646 on SoC LS1021A:
+ * There is a bug of register ippdexpcr1.
+ * Reading configuration register RCPM_IPPDEXPCR1
+ * always return zero. So save ippdexpcr1's value
+ * to register SCFG_SPARECR8.And the value of
+ * ippdexpcr1 will be read from SCFG_SPARECR8.
+ */
+ if (dev_of_node(dev) && (i == 1))
+ if (of_device_is_compatible(np, "fsl,ls1021a-rcpm"))
+ copy_ippdexpcr1_setting(tmp);
}
return 0;
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 59a56cd790ec..fdd8bc08569e 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -17,6 +17,15 @@ config MTK_CMDQ
time limitation, such as updating display configuration during the
vblank.
+config MTK_DEVAPC
+ tristate "Mediatek Device APC Support"
+ help
+ Say yes here to enable support for Mediatek Device APC driver.
+ This driver is mainly used to handle the violation which catches
+ unexpected transaction.
+ The violation information is logged for further analysis or
+ countermeasures.
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
select REGMAP
@@ -44,9 +53,22 @@ config MTK_SCPSYS
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+config MTK_SCPSYS_PM_DOMAINS
+ bool "MediaTek SCPSYS generic power domain"
+ default ARCH_MEDIATEK
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ select REGMAP
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, the System
+ Control Processor System (SCPSYS) has several power management related
+ tasks in the system.
+
config MTK_MMSYS
bool "MediaTek MMSYS Support"
default ARCH_MEDIATEK
+ depends on HAS_IOMEM
help
Say yes here to add support for the MediaTek Multimedia
Subsystem (MMSYS).
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 01f9f873634a..b6908db534c2 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
+obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
+obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
new file mode 100644
index 000000000000..3e8ee5dabb43
--- /dev/null
+++ b/drivers/soc/mediatek/mt8173-pm-domains.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8173_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8173_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8173-power.h>
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1),
+ },
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT),
+ },
+ },
+};
+
+static const struct scpsys_soc_data mt8173_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8173,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8173),
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+};
+
+#endif /* __SOC_MEDIATEK_MT8173_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
new file mode 100644
index 000000000000..8d996c5d2682
--- /dev/null
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8183_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8183_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8183-power.h>
+
+/*
+ * MT8183 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ [MT8183_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = 0x0314,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8183_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = 0x032c,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_CONN, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ },
+ [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = 0x0334,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT8183_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = 0x0338,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_CORE0] = {
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x034c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_CORE1] = {
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x0310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = 0x0348,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_1_MFG, MT8183_TOP_AXI_PROT_EN_1_SET,
+ MT8183_TOP_AXI_PROT_EN_1_CLR, MT8183_TOP_AXI_PROT_EN_STA1_1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MFG, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ },
+ [MT8183_POWER_DOMAIN_DISP] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = 0x030c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_1_DISP, MT8183_TOP_AXI_PROT_EN_1_SET,
+ MT8183_TOP_AXI_PROT_EN_1_CLR, MT8183_TOP_AXI_PROT_EN_STA1_1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_DISP, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_DISP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_CAM] = {
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x0344,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_CAM, MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR, MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_CAM, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR_IGN(MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_CAM,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = 0x0308,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_ISP,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR_IGN(MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_ISP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VDEC] = {
+ .sta_mask = BIT(31),
+ .ctl_offs = 0x0300,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VDEC,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VENC] = {
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = 0x0304,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VENC,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VPU_TOP] = {
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x0324,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_VPU_TOP,
+ MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR,
+ MT8183_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VPU_CORE0] = {
+ .sta_mask = BIT(27),
+ .ctl_offs = 0x33c,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO,
+ },
+ [MT8183_POWER_DOMAIN_VPU_CORE1] = {
+ .sta_mask = BIT(28),
+ .ctl_offs = 0x0340,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO,
+ },
+};
+
+static const struct scpsys_soc_data mt8183_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8183,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8183),
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184
+};
+
+#endif /* __SOC_MEDIATEK_MT8183_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
new file mode 100644
index 000000000000..0fdf6dc6231f
--- /dev/null
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8192_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8192_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8192-power.h>
+
+/*
+ * MT8192 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ [MT8192_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x0354,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_AUDIO,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = 0x0304,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_CONN,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_CONN_2ND,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_CONN,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8192_POWER_DOMAIN_MFG0] = {
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x0308,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG1] = {
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x030c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_MFG1,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_MFG1,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MFG1,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_MFG2] = {
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x0310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG3] = {
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x0314,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG4] = {
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x0318,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG5] = {
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x031c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG6] = {
+ .sta_mask = BIT(8),
+ .ctl_offs = 0x0320,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_DISP] = {
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x0350,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8192_TOP_AXI_PROT_EN_MM_DISP,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR_IGN(MT8192_TOP_AXI_PROT_EN_MM_2_DISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_DISP,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_IPE] = {
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x0338,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_IPE,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_ISP] = {
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x0330,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_ISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_ISP2] = {
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x0334,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_ISP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_MDP] = {
+ .sta_mask = BIT(19),
+ .ctl_offs = 0x034c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_MDP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VENC] = {
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x0344,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VENC,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VDEC] = {
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x033c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VDEC,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VDEC2] = {
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x0340,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM] = {
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x035c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_CAM,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_CAM,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_CAM,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_VDNR_CAM,
+ MT8192_TOP_AXI_PROT_EN_VDNR_SET,
+ MT8192_TOP_AXI_PROT_EN_VDNR_CLR,
+ MT8192_TOP_AXI_PROT_EN_VDNR_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWA] = {
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x0360,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWB] = {
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x0364,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWC] = {
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x0368,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+};
+
+static const struct scpsys_soc_data mt8192_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8192,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8192),
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
+};
+
+#endif /* __SOC_MEDIATEK_MT8192_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 505651b0d715..280d3bd9f675 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -70,14 +70,7 @@ int cmdq_dev_get_client_reg(struct device *dev,
}
EXPORT_SYMBOL(cmdq_dev_get_client_reg);
-static void cmdq_client_timeout(struct timer_list *t)
-{
- struct cmdq_client *client = from_timer(client, t, timer);
-
- dev_err(client->client.dev, "cmdq timeout!\n");
-}
-
-struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
{
struct cmdq_client *client;
@@ -85,12 +78,6 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
if (!client)
return (struct cmdq_client *)-ENOMEM;
- client->timeout_ms = timeout;
- if (timeout != CMDQ_NO_TIMEOUT) {
- spin_lock_init(&client->lock);
- timer_setup(&client->timer, cmdq_client_timeout, 0);
- }
- client->pkt_cnt = 0;
client->client.dev = dev;
client->client.tx_block = false;
client->client.knows_txdone = true;
@@ -112,11 +99,6 @@ EXPORT_SYMBOL(cmdq_mbox_create);
void cmdq_mbox_destroy(struct cmdq_client *client)
{
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- spin_lock(&client->lock);
- del_timer_sync(&client->timer);
- spin_unlock(&client->lock);
- }
mbox_free_channel(client->chan);
kfree(client);
}
@@ -449,18 +431,6 @@ static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
struct cmdq_task_cb *cb = &pkt->cb;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- unsigned long flags = 0;
-
- spin_lock_irqsave(&client->lock, flags);
- if (--client->pkt_cnt == 0)
- del_timer(&client->timer);
- else
- mod_timer(&client->timer, jiffies +
- msecs_to_jiffies(client->timeout_ms));
- spin_unlock_irqrestore(&client->lock, flags);
- }
-
dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
pkt->cmd_buf_size, DMA_TO_DEVICE);
if (cb->cb) {
@@ -473,7 +443,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
void *data)
{
int err;
- unsigned long flags = 0;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
pkt->cb.cb = cb;
@@ -484,14 +453,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
pkt->cmd_buf_size, DMA_TO_DEVICE);
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- spin_lock_irqsave(&client->lock, flags);
- if (client->pkt_cnt++ == 0)
- mod_timer(&client->timer, jiffies +
- msecs_to_jiffies(client->timeout_ms));
- spin_unlock_irqrestore(&client->lock, flags);
- }
-
err = mbox_send_message(client->chan, pkt);
if (err < 0)
return err;
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
new file mode 100644
index 000000000000..f1cea041dc5a
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define VIO_MOD_TO_REG_IND(m) ((m) / 32)
+#define VIO_MOD_TO_REG_OFF(m) ((m) % 32)
+
+struct mtk_devapc_vio_dbgs {
+ union {
+ u32 vio_dbg0;
+ struct {
+ u32 mstid:16;
+ u32 dmnid:6;
+ u32 vio_w:1;
+ u32 vio_r:1;
+ u32 addr_h:4;
+ u32 resv:4;
+ } dbg0_bits;
+ };
+
+ u32 vio_dbg1;
+};
+
+struct mtk_devapc_data {
+ /* numbers of violation index */
+ u32 vio_idx_num;
+
+ /* reg offset */
+ u32 vio_mask_offset;
+ u32 vio_sta_offset;
+ u32 vio_dbg0_offset;
+ u32 vio_dbg1_offset;
+ u32 apc_con_offset;
+ u32 vio_shift_sta_offset;
+ u32 vio_shift_sel_offset;
+ u32 vio_shift_con_offset;
+};
+
+struct mtk_devapc_context {
+ struct device *dev;
+ void __iomem *infra_base;
+ struct clk *infra_clk;
+ const struct mtk_devapc_data *data;
+};
+
+static void clear_vio_status(struct mtk_devapc_context *ctx)
+{
+ void __iomem *reg;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->vio_sta_offset;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(GENMASK(31, 0), reg + 4 * i);
+
+ writel(GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1, 0),
+ reg + 4 * i);
+}
+
+static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask)
+{
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->vio_mask_offset;
+
+ if (mask)
+ val = GENMASK(31, 0);
+ else
+ val = 0;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(val, reg + 4 * i);
+
+ val = readl(reg + 4 * i);
+ if (mask)
+ val |= GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+ else
+ val &= ~GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+
+ writel(val, reg + 4 * i);
+}
+
+#define PHY_DEVAPC_TIMEOUT 0x10000
+
+/*
+ * devapc_sync_vio_dbg - do "shift" mechansim" to get full violation information.
+ * shift mechanism is depends on devapc hardware design.
+ * Mediatek devapc set multiple slaves as a group.
+ * When violation is triggered, violation info is kept
+ * inside devapc hardware.
+ * Driver should do shift mechansim to sync full violation
+ * info to VIO_DBGs registers.
+ *
+ */
+static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ void __iomem *pd_vio_shift_sta_reg;
+ void __iomem *pd_vio_shift_sel_reg;
+ void __iomem *pd_vio_shift_con_reg;
+ int min_shift_group;
+ int ret;
+ u32 val;
+
+ pd_vio_shift_sta_reg = ctx->infra_base +
+ ctx->data->vio_shift_sta_offset;
+ pd_vio_shift_sel_reg = ctx->infra_base +
+ ctx->data->vio_shift_sel_offset;
+ pd_vio_shift_con_reg = ctx->infra_base +
+ ctx->data->vio_shift_con_offset;
+
+ /* Find the minimum shift group which has violation */
+ val = readl(pd_vio_shift_sta_reg);
+ if (!val)
+ return false;
+
+ min_shift_group = __ffs(val);
+
+ /* Assign the group to sync */
+ writel(0x1 << min_shift_group, pd_vio_shift_sel_reg);
+
+ /* Start syncing */
+ writel(0x1, pd_vio_shift_con_reg);
+
+ ret = readl_poll_timeout(pd_vio_shift_con_reg, val, val == 0x3, 0,
+ PHY_DEVAPC_TIMEOUT);
+ if (ret) {
+ dev_err(ctx->dev, "%s: Shift violation info failed\n", __func__);
+ return false;
+ }
+
+ /* Stop syncing */
+ writel(0x0, pd_vio_shift_con_reg);
+
+ /* Write clear */
+ writel(0x1 << min_shift_group, pd_vio_shift_sta_reg);
+
+ return true;
+}
+
+/*
+ * devapc_extract_vio_dbg - extract full violation information after doing
+ * shift mechanism.
+ */
+static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ struct mtk_devapc_vio_dbgs vio_dbgs;
+ void __iomem *vio_dbg0_reg;
+ void __iomem *vio_dbg1_reg;
+
+ vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset;
+ vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset;
+
+ vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg);
+ vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg);
+
+ /* Print violation information */
+ if (vio_dbgs.dbg0_bits.vio_w)
+ dev_info(ctx->dev, "Write Violation\n");
+ else if (vio_dbgs.dbg0_bits.vio_r)
+ dev_info(ctx->dev, "Read Violation\n");
+
+ dev_info(ctx->dev, "Bus ID:0x%x, Dom ID:0x%x, Vio Addr:0x%x\n",
+ vio_dbgs.dbg0_bits.mstid, vio_dbgs.dbg0_bits.dmnid,
+ vio_dbgs.vio_dbg1);
+}
+
+/*
+ * devapc_violation_irq - the devapc Interrupt Service Routine (ISR) will dump
+ * violation information including which master violates
+ * access slave.
+ */
+static irqreturn_t devapc_violation_irq(int irq_number, void *data)
+{
+ struct mtk_devapc_context *ctx = data;
+
+ while (devapc_sync_vio_dbg(ctx))
+ devapc_extract_vio_dbg(ctx);
+
+ clear_vio_status(ctx);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * start_devapc - unmask slave's irq to start receiving devapc violation.
+ */
+static void start_devapc(struct mtk_devapc_context *ctx)
+{
+ writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset);
+
+ mask_module_irq(ctx, false);
+}
+
+/*
+ * stop_devapc - mask slave's irq to stop service.
+ */
+static void stop_devapc(struct mtk_devapc_context *ctx)
+{
+ mask_module_irq(ctx, true);
+
+ writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset);
+}
+
+static const struct mtk_devapc_data devapc_mt6779 = {
+ .vio_idx_num = 511,
+ .vio_mask_offset = 0x0,
+ .vio_sta_offset = 0x400,
+ .vio_dbg0_offset = 0x900,
+ .vio_dbg1_offset = 0x904,
+ .apc_con_offset = 0xF00,
+ .vio_shift_sta_offset = 0xF10,
+ .vio_shift_sel_offset = 0xF14,
+ .vio_shift_con_offset = 0xF20,
+};
+
+static const struct of_device_id mtk_devapc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt6779-devapc",
+ .data = &devapc_mt6779,
+ }, {
+ },
+};
+
+static int mtk_devapc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct mtk_devapc_context *ctx;
+ u32 devapc_irq;
+ int ret;
+
+ if (IS_ERR(node))
+ return -ENODEV;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->data = of_device_get_match_data(&pdev->dev);
+ ctx->dev = &pdev->dev;
+
+ ctx->infra_base = of_iomap(node, 0);
+ if (!ctx->infra_base)
+ return -EINVAL;
+
+ devapc_irq = irq_of_parse_and_map(node, 0);
+ if (!devapc_irq)
+ return -EINVAL;
+
+ ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock");
+ if (IS_ERR(ctx->infra_clk))
+ return -EINVAL;
+
+ if (clk_prepare_enable(ctx->infra_clk))
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
+ IRQF_TRIGGER_NONE, "devapc", ctx);
+ if (ret) {
+ clk_disable_unprepare(ctx->infra_clk);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ctx);
+
+ start_devapc(ctx);
+
+ return 0;
+}
+
+static int mtk_devapc_remove(struct platform_device *pdev)
+{
+ struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
+
+ stop_devapc(ctx);
+
+ clk_disable_unprepare(ctx->infra_clk);
+
+ return 0;
+}
+
+static struct platform_driver mtk_devapc_driver = {
+ .probe = mtk_devapc_probe,
+ .remove = mtk_devapc_remove,
+ .driver = {
+ .name = "mtk-devapc",
+ .of_match_table = mtk_devapc_dt_match,
+ },
+};
+
+module_platform_driver(mtk_devapc_driver);
+
+MODULE_DESCRIPTION("Mediatek Device APC Driver");
+MODULE_AUTHOR("Neal Liu <neal.liu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
index 4a123796aad3..0590b68e0d78 100644
--- a/drivers/soc/mediatek/mtk-infracfg.c
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -12,11 +12,6 @@
#define MTK_POLL_DELAY_US 10
#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
-#define INFRA_TOPAXI_PROTECTEN 0x0220
-#define INFRA_TOPAXI_PROTECTSTA1 0x0228
-#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
-#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
-
/**
* mtk_infracfg_set_bus_protection - enable bus protection
* @infracfg: The infracfg regmap
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index a55f25511173..18f93979e14a 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -5,13 +5,11 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
-#include "../../gpu/drm/mediatek/mtk_drm_ddp.h"
-#include "../../gpu/drm/mediatek/mtk_drm_ddp_comp.h"
-
#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
@@ -308,15 +306,12 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
struct platform_device *clks;
struct platform_device *drm;
void __iomem *config_regs;
- struct resource *mem;
int ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config_regs = devm_ioremap_resource(dev, mem);
+ config_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config_regs)) {
ret = PTR_ERR(config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
+ dev_err(dev, "Failed to ioremap mmsys registers: %d\n", ret);
return ret;
}
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
new file mode 100644
index 000000000000..fb70cb3b07b3
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Collabora Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include "mt8173-pm-domains.h"
+#include "mt8183-pm-domains.h"
+#include "mt8192-pm-domains.h"
+
+#define MTK_POLL_DELAY_US 10
+#define MTK_POLL_TIMEOUT USEC_PER_SEC
+
+#define PWR_RST_B_BIT BIT(0)
+#define PWR_ISO_BIT BIT(1)
+#define PWR_ON_BIT BIT(2)
+#define PWR_ON_2ND_BIT BIT(3)
+#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_SRAM_CLKISO_BIT BIT(5)
+#define PWR_SRAM_ISOINT_B_BIT BIT(6)
+
+struct scpsys_domain {
+ struct generic_pm_domain genpd;
+ const struct scpsys_domain_data *data;
+ struct scpsys *scpsys;
+ int num_clks;
+ struct clk_bulk_data *clks;
+ int num_subsys_clks;
+ struct clk_bulk_data *subsys_clks;
+ struct regmap *infracfg;
+ struct regmap *smi;
+};
+
+struct scpsys {
+ struct device *dev;
+ struct regmap *base;
+ const struct scpsys_soc_data *soc_data;
+ struct genpd_onecell_data pd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd)
+
+static bool scpsys_domain_is_on(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+ u32 status, status2;
+
+ regmap_read(scpsys->base, scpsys->soc_data->pwr_sta_offs, &status);
+ status &= pd->data->sta_mask;
+
+ regmap_read(scpsys->base, scpsys->soc_data->pwr_sta2nd_offs, &status2);
+ status2 &= pd->data->sta_mask;
+
+ /* A domain is on when both status bits are set. */
+ return status && status2;
+}
+
+static int scpsys_sram_enable(struct scpsys_domain *pd)
+{
+ u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned int tmp;
+ int ret;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
+ (tmp & pdn_ack) == 0, MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
+ udelay(1);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
+ }
+
+ return 0;
+}
+
+static int scpsys_sram_disable(struct scpsys_domain *pd)
+{
+ u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned int tmp;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
+ udelay(1);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
+ }
+
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
+ (tmp & pdn_ack) == pdn_ack, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+}
+
+static int _scpsys_bus_protect_enable(const struct scpsys_bus_prot_data *bpd, struct regmap *regmap)
+{
+ int i, ret;
+
+ for (i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) {
+ u32 val, mask = bpd[i].bus_prot_mask;
+
+ if (!mask)
+ break;
+
+ if (bpd[i].bus_prot_reg_update)
+ regmap_set_bits(regmap, bpd[i].bus_prot_set, mask);
+ else
+ regmap_write(regmap, bpd[i].bus_prot_set, mask);
+
+ ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
+ val, (val & mask) == mask,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_bus_protect_enable(struct scpsys_domain *pd)
+{
+ int ret;
+
+ ret = _scpsys_bus_protect_enable(pd->data->bp_infracfg, pd->infracfg);
+ if (ret)
+ return ret;
+
+ return _scpsys_bus_protect_enable(pd->data->bp_smi, pd->smi);
+}
+
+static int _scpsys_bus_protect_disable(const struct scpsys_bus_prot_data *bpd,
+ struct regmap *regmap)
+{
+ int i, ret;
+
+ for (i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) {
+ u32 val, mask = bpd[i].bus_prot_mask;
+
+ if (!mask)
+ continue;
+
+ if (bpd[i].bus_prot_reg_update)
+ regmap_clear_bits(regmap, bpd[i].bus_prot_clr, mask);
+ else
+ regmap_write(regmap, bpd[i].bus_prot_clr, mask);
+
+ if (bpd[i].ignore_clr_ack)
+ continue;
+
+ ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
+ val, !(val & mask),
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_bus_protect_disable(struct scpsys_domain *pd)
+{
+ int ret;
+
+ ret = _scpsys_bus_protect_disable(pd->data->bp_smi, pd->smi);
+ if (ret)
+ return ret;
+
+ return _scpsys_bus_protect_disable(pd->data->bp_infracfg, pd->infracfg);
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+ struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret)
+ return ret;
+
+ /* subsys power on */
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+
+ /* wait until PWR_ACK = 1 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto err_pwr_ack;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+
+ ret = clk_bulk_enable(pd->num_subsys_clks, pd->subsys_clks);
+ if (ret)
+ goto err_pwr_ack;
+
+ ret = scpsys_sram_enable(pd);
+ if (ret < 0)
+ goto err_disable_subsys_clks;
+
+ ret = scpsys_bus_protect_disable(pd);
+ if (ret < 0)
+ goto err_disable_sram;
+
+ return 0;
+
+err_disable_sram:
+ scpsys_sram_disable(pd);
+err_disable_subsys_clks:
+ clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+err_pwr_ack:
+ clk_bulk_disable(pd->num_clks, pd->clks);
+ return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+ struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ ret = scpsys_bus_protect_enable(pd);
+ if (ret < 0)
+ return ret;
+
+ ret = scpsys_sram_disable(pd);
+ if (ret < 0)
+ return ret;
+
+ clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+
+ /* subsys power off */
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+
+ /* wait until PWR_ACK = 0 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ clk_bulk_disable(pd->num_clks, pd->clks);
+
+ return 0;
+}
+
+static struct
+generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
+{
+ const struct scpsys_domain_data *domain_data;
+ struct scpsys_domain *pd;
+ struct property *prop;
+ const char *clk_name;
+ int i, ret, num_clks;
+ struct clk *clk;
+ int clk_ind = 0;
+ u32 id;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret) {
+ dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n",
+ node, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (id >= scpsys->soc_data->num_domains) {
+ dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ domain_data = &scpsys->soc_data->domains_data[id];
+ if (domain_data->sta_mask == 0) {
+ dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->data = domain_data;
+ pd->scpsys = scpsys;
+
+ pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
+ if (IS_ERR(pd->infracfg))
+ return ERR_CAST(pd->infracfg);
+
+ pd->smi = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,smi");
+ if (IS_ERR(pd->smi))
+ return ERR_CAST(pd->smi);
+
+ num_clks = of_clk_get_parent_count(node);
+ if (num_clks > 0) {
+ /* Calculate number of subsys_clks */
+ of_property_for_each_string(node, "clock-names", prop, clk_name) {
+ char *subsys;
+
+ subsys = strchr(clk_name, '-');
+ if (subsys)
+ pd->num_subsys_clks++;
+ else
+ pd->num_clks++;
+ }
+
+ pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL);
+ if (!pd->clks)
+ return ERR_PTR(-ENOMEM);
+
+ pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks,
+ sizeof(*pd->subsys_clks), GFP_KERNEL);
+ if (!pd->subsys_clks)
+ return ERR_PTR(-ENOMEM);
+
+ }
+
+ for (i = 0; i < pd->num_clks; i++) {
+ clk = of_clk_get(node, i);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err_probe(scpsys->dev, ret,
+ "%pOF: failed to get clk at index %d: %d\n", node, i, ret);
+ goto err_put_clocks;
+ }
+
+ pd->clks[clk_ind++].clk = clk;
+ }
+
+ for (i = 0; i < pd->num_subsys_clks; i++) {
+ clk = of_clk_get(node, i + clk_ind);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err_probe(scpsys->dev, ret,
+ "%pOF: failed to get clk at index %d: %d\n", node,
+ i + clk_ind, ret);
+ goto err_put_subsys_clocks;
+ }
+
+ pd->subsys_clks[i].clk = clk;
+ }
+
+ ret = clk_bulk_prepare(pd->num_clks, pd->clks);
+ if (ret)
+ goto err_put_subsys_clocks;
+
+ ret = clk_bulk_prepare(pd->num_subsys_clks, pd->subsys_clks);
+ if (ret)
+ goto err_unprepare_clocks;
+
+ /*
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
+ */
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) {
+ if (scpsys_domain_is_on(pd))
+ dev_warn(scpsys->dev,
+ "%pOF: A default off power domain has been ON\n", node);
+ } else {
+ ret = scpsys_power_on(&pd->genpd);
+ if (ret < 0) {
+ dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
+ goto err_unprepare_clocks;
+ }
+ }
+
+ if (scpsys->domains[id]) {
+ ret = -EINVAL;
+ dev_err(scpsys->dev,
+ "power domain with id %d already exists, check your device-tree\n", id);
+ goto err_unprepare_subsys_clocks;
+ }
+
+ pd->genpd.name = node->name;
+ pd->genpd.power_off = scpsys_power_off;
+ pd->genpd.power_on = scpsys_power_on;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
+ pm_genpd_init(&pd->genpd, NULL, true);
+ else
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ scpsys->domains[id] = &pd->genpd;
+
+ return scpsys->pd_data.domains[id];
+
+err_unprepare_subsys_clocks:
+ clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+err_unprepare_clocks:
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+err_put_subsys_clocks:
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+err_put_clocks:
+ clk_bulk_put(pd->num_clks, pd->clks);
+ return ERR_PTR(ret);
+}
+
+static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent)
+{
+ struct generic_pm_domain *child_pd, *parent_pd;
+ struct device_node *child;
+ int ret;
+
+ for_each_child_of_node(parent, child) {
+ u32 id;
+
+ ret = of_property_read_u32(parent, "reg", &id);
+ if (ret) {
+ dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child);
+ goto err_put_node;
+ }
+
+ if (!scpsys->pd_data.domains[id]) {
+ ret = -EINVAL;
+ dev_err(scpsys->dev, "power domain with id %d does not exist\n", id);
+ goto err_put_node;
+ }
+
+ parent_pd = scpsys->pd_data.domains[id];
+
+ child_pd = scpsys_add_one_domain(scpsys, child);
+ if (IS_ERR(child_pd)) {
+ ret = PTR_ERR(child_pd);
+ dev_err(scpsys->dev, "%pOF: failed to get child domain id\n", child);
+ goto err_put_node;
+ }
+
+ ret = pm_genpd_add_subdomain(parent_pd, child_pd);
+ if (ret) {
+ dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
+ child_pd->name, parent_pd->name);
+ goto err_put_node;
+ } else {
+ dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
+ child_pd->name);
+ }
+
+ /* recursive call to add all subdomains */
+ ret = scpsys_add_subdomain(scpsys, child);
+ if (ret)
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(child);
+ return ret;
+}
+
+static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+{
+ int ret;
+
+ if (scpsys_domain_is_on(pd))
+ scpsys_power_off(&pd->genpd);
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->scpsys->dev,
+ "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
+
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+ clk_bulk_put(pd->num_clks, pd->clks);
+
+ clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+}
+
+static void scpsys_domain_cleanup(struct scpsys *scpsys)
+{
+ struct generic_pm_domain *genpd;
+ struct scpsys_domain *pd;
+ int i;
+
+ for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) {
+ genpd = scpsys->pd_data.domains[i];
+ if (genpd) {
+ pd = to_scpsys_domain(genpd);
+ scpsys_remove_one_domain(pd);
+ }
+ }
+}
+
+static const struct of_device_id scpsys_of_match[] = {
+ {
+ .compatible = "mediatek,mt8173-power-controller",
+ .data = &mt8173_scpsys_data,
+ },
+ {
+ .compatible = "mediatek,mt8183-power-controller",
+ .data = &mt8183_scpsys_data,
+ },
+ {
+ .compatible = "mediatek,mt8192-power-controller",
+ .data = &mt8192_scpsys_data,
+ },
+ { }
+};
+
+static int scpsys_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct scpsys_soc_data *soc;
+ struct device_node *node;
+ struct device *parent;
+ struct scpsys *scpsys;
+ int ret;
+
+ soc = of_device_get_match_data(&pdev->dev);
+ if (!soc) {
+ dev_err(&pdev->dev, "no power controller data\n");
+ return -EINVAL;
+ }
+
+ scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, soc->num_domains), GFP_KERNEL);
+ if (!scpsys)
+ return -ENOMEM;
+
+ scpsys->dev = dev;
+ scpsys->soc_data = soc;
+
+ scpsys->pd_data.domains = scpsys->domains;
+ scpsys->pd_data.num_domains = soc->num_domains;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for syscon devices\n");
+ return -ENODEV;
+ }
+
+ scpsys->base = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(scpsys->base)) {
+ dev_err(dev, "no regmap available\n");
+ return PTR_ERR(scpsys->base);
+ }
+
+ ret = -ENODEV;
+ for_each_available_child_of_node(np, node) {
+ struct generic_pm_domain *domain;
+
+ domain = scpsys_add_one_domain(scpsys, node);
+ if (IS_ERR(domain)) {
+ ret = PTR_ERR(domain);
+ of_node_put(node);
+ goto err_cleanup_domains;
+ }
+
+ ret = scpsys_add_subdomain(scpsys, node);
+ if (ret) {
+ of_node_put(node);
+ goto err_cleanup_domains;
+ }
+ }
+
+ if (ret) {
+ dev_dbg(dev, "no power domains present\n");
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data);
+ if (ret) {
+ dev_err(dev, "failed to add provider: %d\n", ret);
+ goto err_cleanup_domains;
+ }
+
+ return 0;
+
+err_cleanup_domains:
+ scpsys_domain_cleanup(scpsys);
+ return ret;
+}
+
+static struct platform_driver scpsys_pm_domain_driver = {
+ .probe = scpsys_probe,
+ .driver = {
+ .name = "mtk-power-controller",
+ .suppress_bind_attrs = true,
+ .of_match_table = scpsys_of_match,
+ },
+};
+builtin_platform_driver(scpsys_pm_domain_driver);
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
new file mode 100644
index 000000000000..a2f4d8f97e05
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MTK_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MTK_PM_DOMAINS_H
+
+#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
+#define MTK_SCPD_FWAIT_SRAM BIT(1)
+#define MTK_SCPD_SRAM_ISO BIT(2)
+#define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3)
+#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
+
+#define SPM_VDE_PWR_CON 0x0210
+#define SPM_MFG_PWR_CON 0x0214
+#define SPM_VEN_PWR_CON 0x0230
+#define SPM_ISP_PWR_CON 0x0238
+#define SPM_DIS_PWR_CON 0x023c
+#define SPM_VEN2_PWR_CON 0x0298
+#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_MFG_2D_PWR_CON 0x02c0
+#define SPM_MFG_ASYNC_PWR_CON 0x02c4
+#define SPM_USB_PWR_CON 0x02cc
+
+#define SPM_PWR_STATUS 0x060c
+#define SPM_PWR_STATUS_2ND 0x0610
+
+#define PWR_STATUS_CONN BIT(1)
+#define PWR_STATUS_DISP BIT(3)
+#define PWR_STATUS_MFG BIT(4)
+#define PWR_STATUS_ISP BIT(5)
+#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_VENC_LT BIT(20)
+#define PWR_STATUS_VENC BIT(21)
+#define PWR_STATUS_MFG_2D BIT(22)
+#define PWR_STATUS_MFG_ASYNC BIT(23)
+#define PWR_STATUS_AUDIO BIT(24)
+#define PWR_STATUS_USB BIT(25)
+
+#define SPM_MAX_BUS_PROT_DATA 5
+
+#define _BUS_PROT(_mask, _set, _clr, _sta, _update, _ignore) { \
+ .bus_prot_mask = (_mask), \
+ .bus_prot_set = _set, \
+ .bus_prot_clr = _clr, \
+ .bus_prot_sta = _sta, \
+ .bus_prot_reg_update = _update, \
+ .ignore_clr_ack = _ignore, \
+ }
+
+#define BUS_PROT_WR(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, false, false)
+
+#define BUS_PROT_WR_IGN(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, false, true)
+
+#define BUS_PROT_UPDATE(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, true, false)
+
+#define BUS_PROT_UPDATE_TOPAXI(_mask) \
+ BUS_PROT_UPDATE(_mask, \
+ INFRA_TOPAXI_PROTECTEN, \
+ INFRA_TOPAXI_PROTECTEN_CLR, \
+ INFRA_TOPAXI_PROTECTSTA1)
+
+struct scpsys_bus_prot_data {
+ u32 bus_prot_mask;
+ u32 bus_prot_set;
+ u32 bus_prot_clr;
+ u32 bus_prot_sta;
+ bool bus_prot_reg_update;
+ bool ignore_clr_ack;
+};
+
+#define MAX_SUBSYS_CLKS 10
+
+/**
+ * struct scpsys_domain_data - scp domain data for power on/off flow
+ * @sta_mask: The mask for power on/off status bit.
+ * @ctl_offs: The offset for main power control register.
+ * @sram_pdn_bits: The mask for sram power control bits.
+ * @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @caps: The flag for active wake-up action.
+ * @bp_infracfg: bus protection for infracfg subsystem
+ * @bp_smi: bus protection for smi subsystem
+ */
+struct scpsys_domain_data {
+ u32 sta_mask;
+ int ctl_offs;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u8 caps;
+ const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA];
+ const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA];
+};
+
+struct scpsys_soc_data {
+ const struct scpsys_domain_data *domains_data;
+ int num_domains;
+ int pwr_sta_offs;
+ int pwr_sta2nd_offs;
+};
+
+#endif /* __SOC_MEDIATEK_MTK_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index f669d3754627..ca75b14931ec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -524,6 +524,7 @@ static void mtk_register_power_domains(struct platform_device *pdev,
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
+ bool on;
/*
* Initially turn on all domains to make the domains usable
@@ -531,9 +532,9 @@ static void mtk_register_power_domains(struct platform_device *pdev,
* software. The unused domains will be switched off during
* late_init time.
*/
- genpd->power_on(genpd);
+ on = !WARN_ON(genpd->power_on(genpd) < 0);
- pm_genpd_init(genpd, NULL, false);
+ pm_genpd_init(genpd, NULL, !on);
}
/*
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6a3b69b43ad5..79b568f82a1c 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -17,7 +17,7 @@ config QCOM_AOSS_QMP
Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
config QCOM_COMMAND_DB
- bool "Qualcomm Command DB"
+ tristate "Qualcomm Command DB"
depends on ARCH_QCOM || COMPILE_TEST
depends on OF_RESERVED_MEM
help
@@ -108,8 +108,9 @@ config QCOM_RMTFS_MEM
Say y here if you intend to boot the modem remoteproc.
config QCOM_RPMH
- bool "Qualcomm RPM-Hardened (RPMH) Communication"
+ tristate "Qualcomm RPM-Hardened (RPMH) Communication"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB)
help
Support for communication with the hardened-RPM blocks in
Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index fc5610603b17..dd872017f345 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
@@ -340,12 +341,14 @@ static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
{ }
};
+MODULE_DEVICE_TABLE(of, cmd_db_match_table);
static struct platform_driver cmd_db_dev_driver = {
.probe = cmd_db_dev_probe,
.driver = {
.name = "cmd-db",
.of_match_table = cmd_db_match_table,
+ .suppress_bind_attrs = true,
},
};
@@ -354,3 +357,6 @@ static int __init cmd_db_device_init(void)
return platform_driver_register(&cmd_db_dev_driver);
}
arch_initcall(cmd_db_device_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
index c20cb92077c0..7886af4fd726 100644
--- a/drivers/soc/qcom/kryo-l2-accessors.c
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -16,7 +16,7 @@ static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
* kryo_l2_set_indirect_reg() - write value to an L2 register
* @reg: Address of L2 register.
- * @value: Value to be written to register.
+ * @val: Value to be written to register.
*
* Use architecturally required barriers for ordering between system register
* accesses, and system registers with respect to device memory
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 70fbe70c6213..16b421608e9c 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -45,10 +45,13 @@
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
+#define LLCC_TRP_PCB_ACT 0x21f04
+
#define BANK_OFFSET_STRIDE 0x80000
/**
- * llcc_slice_config - Data associated with the llcc slice
+ * struct llcc_slice_config - Data associated with the llcc slice
* @usecase_id: Unique id for the client's use case
* @slice_id: llcc slice id for each client
* @max_cap: The maximum capacity of the cache slice provided in KB
@@ -89,6 +92,7 @@ struct llcc_slice_config {
struct qcom_llcc_config {
const struct llcc_slice_config *sct_data;
int size;
+ bool need_llcc_cfg;
};
static const struct llcc_slice_config sc7180_data[] = {
@@ -119,14 +123,45 @@ static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
};
+static const struct llcc_slice_config sm8150_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+};
+
static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
+ .need_llcc_cfg = true,
};
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
+ .need_llcc_cfg = false,
+};
+
+static const struct qcom_llcc_config sm8150_cfg = {
+ .sct_data = sm8150_data,
+ .size = ARRAY_SIZE(sm8150_data),
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -318,62 +353,91 @@ size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
}
EXPORT_SYMBOL_GPL(llcc_get_slice_size);
-static int qcom_llcc_cfg_program(struct platform_device *pdev)
+static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
{
- int i;
+ int ret;
u32 attr1_cfg;
u32 attr0_cfg;
u32 attr1_val;
u32 attr0_val;
u32 max_cap_cacheline;
+ struct llcc_slice_desc desc;
+
+ attr1_val = config->cache_mode;
+ attr1_val |= config->probe_target_ways << ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= config->fixed_size << ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= config->priority << ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(config->max_cap);
+
+ /*
+ * LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instance (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ attr0_val = config->res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= config->bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ attr0_cfg = LLCC_TRP_ATTR0_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ if (cfg->need_llcc_cfg) {
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ retain_pc = config->retain_on_pc << config->slice_id;
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_PCB_ACT, retain_pc);
+ if (ret)
+ return ret;
+ }
+
+ if (config->activate_on_init) {
+ desc.slice_id = config->slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev,
+ const struct qcom_llcc_config *cfg)
+{
+ int i;
u32 sz;
int ret = 0;
const struct llcc_slice_config *llcc_table;
- struct llcc_slice_desc desc;
sz = drv_data->cfg_size;
llcc_table = drv_data->cfg;
for (i = 0; i < sz; i++) {
- attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
- attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
-
- attr1_val = llcc_table[i].cache_mode;
- attr1_val |= llcc_table[i].probe_target_ways <<
- ATTR1_PROBE_TARGET_WAYS_SHIFT;
- attr1_val |= llcc_table[i].fixed_size <<
- ATTR1_FIXED_SIZE_SHIFT;
- attr1_val |= llcc_table[i].priority <<
- ATTR1_PRIORITY_SHIFT;
-
- max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
-
- /* LLCC instances can vary for each target.
- * The SW writes to broadcast register which gets propagated
- * to each llcc instace (llcc0,.. llccN).
- * Since the size of the memory is divided equally amongst the
- * llcc instances, we need to configure the max cap accordingly.
- */
- max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
- max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
- attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
-
- attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
- attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
-
- ret = regmap_write(drv_data->bcast_regmap, attr1_cfg,
- attr1_val);
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
if (ret)
return ret;
- ret = regmap_write(drv_data->bcast_regmap, attr0_cfg,
- attr0_val);
- if (ret)
- return ret;
- if (llcc_table[i].activate_on_init) {
- desc.slice_id = llcc_table[i].slice_id;
- ret = llcc_slice_activate(&desc);
- }
}
+
return ret;
}
@@ -472,7 +536,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
- ret = qcom_llcc_cfg_program(pdev);
+ ret = qcom_llcc_cfg_program(pdev, cfg);
if (ret)
goto err;
@@ -494,6 +558,7 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ }
};
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 088dc99f77f3..209dcdca923f 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -110,7 +110,7 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
pdr->locator_addr.sq_port = 0;
}
-static struct qmi_ops pdr_locator_ops = {
+static const struct qmi_ops pdr_locator_ops = {
.new_server = pdr_locator_new_server,
.del_server = pdr_locator_del_server,
};
@@ -238,7 +238,7 @@ static void pdr_notifier_del_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->list_lock);
}
-static struct qmi_ops pdr_notifier_ops = {
+static const struct qmi_ops pdr_notifier_ops = {
.new_server = pdr_notifier_new_server,
.del_server = pdr_notifier_del_server,
};
@@ -343,7 +343,7 @@ static void pdr_indication_cb(struct qmi_handle *qmi,
queue_work(pdr->indack_wq, &pdr->indack_work);
}
-static struct qmi_msg_handler qmi_indication_handler[] = {
+static const struct qmi_msg_handler qmi_indication_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = SERVREG_STATE_UPDATED_IND_ID,
@@ -569,7 +569,7 @@ EXPORT_SYMBOL(pdr_add_lookup);
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
{
struct servreg_restart_pd_resp resp;
- struct servreg_restart_pd_req req;
+ struct servreg_restart_pd_req req = { 0 };
struct sockaddr_qrtr addr;
struct pdr_service *tmp;
struct qmi_txn txn;
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7649b2057b9a..f42954e2c98e 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -82,10 +82,11 @@
#define NUM_AHB_CLKS 2
/**
- * @struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * struct geni_wrapper - Data structure to represent the QUP Wrapper Core
* @dev: Device pointer of the QUP wrapper core
* @base: Base address of this instance of QUP wrapper core
* @ahb_clks: Handle to the primary & secondary AHB clocks
+ * @to_core: Core ICC path
*/
struct geni_wrapper {
struct device *dev;
@@ -237,7 +238,7 @@ static void geni_se_irq_clear(struct geni_se *se)
* geni_se_init() - Initialize the GENI serial engine
* @se: Pointer to the concerned serial engine.
* @rx_wm: Receive watermark, in units of FIFO words.
- * @rx_rfr_wm: Ready-for-receive watermark, in units of FIFO words.
+ * @rx_rfr: Ready-for-receive watermark, in units of FIFO words.
*
* This function is used to initialize the GENI serial engine, configure
* receive watermark and ready-for-receive watermarks.
@@ -732,7 +733,7 @@ void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
{
struct geni_wrapper *wrapper = se->wrapper;
- if (iova && !dma_mapping_error(wrapper->dev, iova))
+ if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(geni_se_tx_dma_unprep);
@@ -749,7 +750,7 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
{
struct geni_wrapper *wrapper = se->wrapper;
- if (iova && !dma_mapping_error(wrapper->dev, iova))
+ if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
}
EXPORT_SYMBOL(geni_se_rx_dma_unprep);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index ed2c687c16b3..b5840d624bc6 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -65,6 +65,7 @@ struct qmp_cooling_device {
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
* @pd_data: genpd data
+ * @cooling_devs: thermal cooling devices
*/
struct qmp {
void __iomem *msgram;
@@ -225,7 +226,6 @@ static bool qmp_message_empty(struct qmp *qmp)
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
- size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
@@ -242,7 +242,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
writel(len, qmp->msgram + qmp->offset);
/* Read back len to confirm data written in message RAM */
- tlen = readl(qmp->msgram + qmp->offset);
+ readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a297911afe57..37969dcbaf14 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -13,6 +13,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -497,7 +498,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
- trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
@@ -1018,6 +1019,7 @@ static const struct of_device_id rpmh_drv_match[] = {
{ .compatible = "qcom,rpmh-rsc", },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmh_drv_match);
static struct platform_driver rpmh_driver = {
.probe = rpmh_rsc_probe,
@@ -1033,3 +1035,6 @@ static int __init rpmh_driver_init(void)
return platform_driver_register(&rpmh_driver);
}
arch_initcall(rpmh_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index b61e183ede69..01765ee9cdfb 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -181,8 +181,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
struct cache_req *req;
int i;
- rpm_msg->msg.state = state;
-
/* Cache the request in our store and link the payload */
for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
@@ -190,8 +188,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
return PTR_ERR(req);
}
- rpm_msg->msg.state = state;
-
if (state == RPMH_ACTIVE_ONLY_STATE) {
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
@@ -254,7 +250,7 @@ EXPORT_SYMBOL(rpmh_write_async);
/**
* rpmh_write: Write a set of RPMH commands and block until response
*
- * @rc: The RPMH handle got from rpmh_get_client
+ * @dev: The device making the request
* @state: Active/sleep set
* @cmd: The payload data
* @n: The number of elements in @cmd
@@ -268,11 +264,9 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
int ret;
- if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
- return -EINVAL;
-
- memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
- rpm_msg.msg.num_cmds = n;
+ ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
+ if (ret)
+ return ret;
ret = __rpmh_write(dev, state, &rpm_msg);
if (ret)
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index e72426221a69..7ce06356d24c 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -24,9 +24,12 @@
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
* @pd: generic_pm_domain corrresponding to the power domain
+ * @parent: generic_pm_domain corrresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
+ * @corner: current corner
+ * @active_corner: current active corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
@@ -132,6 +135,18 @@ static const struct rpmhpd_desc sdm845_desc = {
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
+/* SDX55 RPMH powerdomains */
+static struct rpmhpd *sdx55_rpmhpds[] = {
+ [SDX55_MSS] = &sdm845_mss,
+ [SDX55_MX] = &sdm845_mx,
+ [SDX55_CX] = &sdm845_cx,
+};
+
+static const struct rpmhpd_desc sdx55_desc = {
+ .rpmhpds = sdx55_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx55_rpmhpds),
+};
+
/* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
@@ -205,6 +220,7 @@ static const struct rpmhpd_desc sc7180_desc = {
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ }
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index f2168e4259b2..85d1207b72d7 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -35,7 +35,7 @@
#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
#define KEY_LEVEL 0x6c766c76 /* vlvl */
-#define MAX_8996_RPMPD_STATE 6
+#define MAX_CORNER_RPMPD_STATE 6
#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
r_id) \
@@ -116,6 +116,52 @@ struct rpmpd_desc {
static DEFINE_MUTEX(rpmpd_lock);
+/* msm8939 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
+
+DEFINE_RPMPD_PAIR(msm8939, vddcx, vddcx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_VFC(msm8939, vddcx_vfc, SMPA, 2);
+
+DEFINE_RPMPD_PAIR(msm8939, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+static struct rpmpd *msm8939_rpmpds[] = {
+ [MSM8939_VDDMDCX] = &msm8939_vddmd,
+ [MSM8939_VDDMDCX_AO] = &msm8939_vddmd_ao,
+ [MSM8939_VDDMDCX_VFC] = &msm8939_vddmd_vfc,
+ [MSM8939_VDDCX] = &msm8939_vddcx,
+ [MSM8939_VDDCX_AO] = &msm8939_vddcx_ao,
+ [MSM8939_VDDCX_VFC] = &msm8939_vddcx_vfc,
+ [MSM8939_VDDMX] = &msm8939_vddmx,
+ [MSM8939_VDDMX_AO] = &msm8939_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8939_desc = {
+ .rpmpds = msm8939_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8939_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8916 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8916, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8916, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+DEFINE_RPMPD_VFC(msm8916, vddcx_vfc, SMPA, 1);
+
+static struct rpmpd *msm8916_rpmpds[] = {
+ [MSM8916_VDDCX] = &msm8916_vddcx,
+ [MSM8916_VDDCX_AO] = &msm8916_vddcx_ao,
+ [MSM8916_VDDCX_VFC] = &msm8916_vddcx_vfc,
+ [MSM8916_VDDMX] = &msm8916_vddmx,
+ [MSM8916_VDDMX_AO] = &msm8916_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8916_desc = {
+ .rpmpds = msm8916_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8916_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
/* msm8976 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
@@ -159,7 +205,7 @@ static struct rpmpd *msm8996_rpmpds[] = {
static const struct rpmpd_desc msm8996_desc = {
.rpmpds = msm8996_rpmpds,
.num_pds = ARRAY_SIZE(msm8996_rpmpds),
- .max_state = MAX_8996_RPMPD_STATE,
+ .max_state = MAX_CORNER_RPMPD_STATE,
};
/* msm8998 RPM Power domains */
@@ -220,11 +266,46 @@ static const struct rpmpd_desc qcs404_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
+/* sdm660 RPM Power domains */
+DEFINE_RPMPD_PAIR(sdm660, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sdm660, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_ssccx, RWLC, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_ssccx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_sscmx, RWLM, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_sscmx_vfl, RWLM, 0);
+
+static struct rpmpd *sdm660_rpmpds[] = {
+ [SDM660_VDDCX] = &sdm660_vddcx,
+ [SDM660_VDDCX_AO] = &sdm660_vddcx_ao,
+ [SDM660_VDDCX_VFL] = &sdm660_vddcx_vfl,
+ [SDM660_VDDMX] = &sdm660_vddmx,
+ [SDM660_VDDMX_AO] = &sdm660_vddmx_ao,
+ [SDM660_VDDMX_VFL] = &sdm660_vddmx_vfl,
+ [SDM660_SSCCX] = &sdm660_vdd_ssccx,
+ [SDM660_SSCCX_VFL] = &sdm660_vdd_ssccx_vfl,
+ [SDM660_SSCMX] = &sdm660_vdd_sscmx,
+ [SDM660_SSCMX_VFL] = &sdm660_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc sdm660_desc = {
+ .rpmpds = sdm660_rpmpds,
+ .num_pds = ARRAY_SIZE(sdm660_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
+ { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
+ { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 28c19bcb2f20..7251827bac88 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -122,7 +122,7 @@ struct smem_global_entry {
* @free_offset: index of the first unallocated byte in smem
* @available: number of bytes available for allocation
* @reserved: reserved field, must be 0
- * toc: array of references to items
+ * @toc: array of references to items
*/
struct smem_header {
struct smem_proc_comm proc_comm[4];
@@ -255,6 +255,7 @@ struct smem_region {
* processor/host
* @cacheline: list of cacheline sizes for each host
* @item_count: max accepted item number
+ * @socinfo: platform device pointer
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
*/
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 07183d731d74..2df488333be9 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -112,6 +112,7 @@ struct smp2p_entry {
* struct qcom_smp2p - device driver context
* @dev: device driver handle
* @in: pointer to the inbound smem item
+ * @out: pointer to the outbound smem item
* @smem_items: ids of the two smem items
* @valid_entries: already scanned inbound entries
* @local_pid: processor id of the inbound edge
@@ -318,15 +319,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
static int smp2p_update_bits(void *data, u32 mask, u32 value)
{
struct smp2p_entry *entry = data;
+ unsigned long flags;
u32 orig;
u32 val;
- spin_lock(&entry->lock);
+ spin_lock_irqsave(&entry->lock, flags);
val = orig = readl(entry->value);
val &= ~mask;
val |= value;
writel(val, entry->value);
- spin_unlock(&entry->lock);
+ spin_unlock_irqrestore(&entry->lock, flags);
if (val != orig)
qcom_smp2p_kick(entry->smp2p);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 70c3c90b997c..1d3d5e3ec2b0 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -130,7 +130,7 @@ struct smsm_host {
/**
* smsm_update_bits() - change bit in outgoing entry and inform subscribers
* @data: smsm context pointer
- * @offset: bit in the entry
+ * @mask: value mask
* @value: new value
*
* Used to set and clear the bits in the outgoing/local entry and inform
@@ -254,10 +254,8 @@ static void smsm_mask_irq(struct irq_data *irqd)
* smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
* @irqd: IRQ handle to be unmasked
*
-
* This subscribes the local CPU to interrupts upon changes to the defined
* status bit. The bit is also marked for cascading.
-
*/
static void smsm_unmask_irq(struct irq_data *irqd)
{
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b44ede48decc..d21530d24253 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -218,13 +218,19 @@ static const struct soc_id soc_id[] = {
{ 251, "MSM8992" },
{ 253, "APQ8094" },
{ 291, "APQ8096" },
+ { 293, "MSM8953" },
+ { 304, "APQ8053" },
{ 305, "MSM8996SG" },
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
{ 318, "SDM630" },
{ 321, "SDM845" },
+ { 338, "SDM450" },
{ 341, "SDA845" },
+ { 349, "SDM632" },
+ { 350, "SDA632" },
+ { 351, "SDA450" },
{ 356, "SM8250" },
{ 402, "IPQ6018" },
{ 425, "SC7180" },
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index e5c68051fb17..32bed249f90e 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -68,9 +68,8 @@ struct wcnss_msg_hdr {
u32 len;
} __packed;
-/**
+/*
* struct wcnss_version_resp - version request response
- * @hdr: common packet wcnss_msg_hdr header
*/
struct wcnss_version_resp {
struct wcnss_msg_hdr hdr;
@@ -108,9 +107,11 @@ struct wcnss_download_nv_resp {
/**
* wcnss_ctrl_smd_callback() - handler from SMD responses
- * @channel: smd channel handle
+ * @rpdev: remote processor message device pointer
* @data: pointer to the incoming data packet
* @count: size of the incoming data packet
+ * @priv: unused
+ * @addr: unused
*
* Handles any incoming packets from the remote WCNSS_CTRL service.
*/
@@ -267,6 +268,7 @@ free_req:
* @wcnss: wcnss handle, retrieved from drvdata
* @name: SMD channel name
* @cb: callback to handle incoming data on the channel
+ * @priv: private data for use in the call-back
*/
struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
{
diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
index 54b616ad4a62..9046b8c933cb 100644
--- a/drivers/soc/renesas/rmobile-sysc.c
+++ b/drivers/soc/renesas/rmobile-sysc.c
@@ -57,19 +57,19 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
return ret;
}
- if (__raw_readl(rmobile_pd->base + PSTR) & mask) {
+ if (readl(rmobile_pd->base + PSTR) & mask) {
unsigned int retry_count;
- __raw_writel(mask, rmobile_pd->base + SPDCR);
+ writel(mask, rmobile_pd->base + SPDCR);
for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(rmobile_pd->base + SPDCR) & mask))
+ if (!(readl(rmobile_pd->base + SPDCR) & mask))
break;
cpu_relax();
}
}
pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", genpd->name, mask,
- __raw_readl(rmobile_pd->base + PSTR));
+ readl(rmobile_pd->base + PSTR));
return 0;
}
@@ -80,13 +80,13 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
unsigned int retry_count;
int ret = 0;
- if (__raw_readl(rmobile_pd->base + PSTR) & mask)
+ if (readl(rmobile_pd->base + PSTR) & mask)
return ret;
- __raw_writel(mask, rmobile_pd->base + SWUCR);
+ writel(mask, rmobile_pd->base + SWUCR);
for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(rmobile_pd->base + SWUCR) & mask))
+ if (!(readl(rmobile_pd->base + SWUCR) & mask))
break;
if (retry_count > PSTR_RETRIES)
udelay(PSTR_DELAY_US);
@@ -98,7 +98,7 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
rmobile_pd->genpd.name, mask,
- __raw_readl(rmobile_pd->base + PSTR));
+ readl(rmobile_pd->base + PSTR));
return ret;
}
@@ -327,6 +327,7 @@ static int __init rmobile_init_pm_domains(void)
pmd = of_get_child_by_name(np, "pm-domains");
if (!pmd) {
+ iounmap(base);
pr_warn("%pOF lacks pm-domains node\n", np);
continue;
}
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index eece97f97ef8..cf8182fc3642 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -53,9 +53,6 @@
struct rockchip_iodomain;
-/**
- * @supplies: voltage settings matching the register bits.
- */
struct rockchip_iodomain_soc_data {
int grf_offset;
const char *supply_names[MAX_SUPPLIES];
@@ -547,6 +544,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
if (uV < 0) {
dev_err(iod->dev, "Can't determine voltage: %s\n",
supply_name);
+ ret = uV;
goto unreg_notify;
}
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 8d4d05086906..1a76eade2ed6 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -20,6 +20,7 @@ static const struct exynos_soc_id {
const char *name;
unsigned int id;
} soc_ids[] = {
+ /* List ordered by SoC name */
{ "EXYNOS3250", 0xE3472000 },
{ "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
{ "EXYNOS4210", 0x43210000 },
@@ -29,10 +30,10 @@ static const struct exynos_soc_id {
{ "EXYNOS5260", 0xE5260000 },
{ "EXYNOS5410", 0xE5410000 },
{ "EXYNOS5420", 0xE5420000 },
+ { "EXYNOS5433", 0xE5433000 },
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
- { "EXYNOS5433", 0xE5433000 },
};
static const char * __init product_id_to_soc_id(unsigned int product_id)
@@ -98,9 +99,9 @@ static int __init exynos_chipid_early_init(void)
goto err;
}
- /* it is too early to use dev_info() here (soc_dev is NULL) */
- pr_info("soc soc0: Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
- soc_dev_attr->soc_id, product_id, revision);
+ dev_info(soc_device_to_device(soc_dev),
+ "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, product_id, revision);
return 0;
@@ -110,4 +111,4 @@ err:
return ret;
}
-early_initcall(exynos_chipid_early_init);
+arch_initcall(exynos_chipid_early_init);
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 17304fa18429..a18c93a4646c 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -97,6 +98,10 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
{ /*sentinel*/ },
};
+static const struct mfd_cell exynos_pmu_devs[] = {
+ { .name = "exynos-clkout", },
+};
+
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
@@ -110,6 +115,7 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu_base_addr))
@@ -128,6 +134,11 @@ static int exynos_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmu_context);
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
+ ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
+ if (ret)
+ return ret;
+
if (devm_of_platform_populate(dev))
dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
index 01bb3050d678..ca409a976e34 100644
--- a/drivers/soc/samsung/exynos5422-asv.c
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -383,7 +383,7 @@ static int __asv_offset_voltage(unsigned int index)
return 25000;
default:
return 0;
- };
+ }
}
static void exynos5422_asv_offset_voltage_setup(struct exynos_asv *asv)
diff --git a/drivers/soc/samsung/s3c-pm-check.c b/drivers/soc/samsung/s3c-pm-check.c
index ff3e099fc208..439d5c372512 100644
--- a/drivers/soc/samsung/s3c-pm-check.c
+++ b/drivers/soc/samsung/s3c-pm-check.c
@@ -151,7 +151,7 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
/**
* s3c_pm_runcheck() - helper to check a resource on restore.
* @res: The resource to check
- * @vak: Pointer to list of CRC32 values to check.
+ * @val: Pointer to list of CRC32 values to check.
*
* Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
* function runs the given memory resource checking it against the stored
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index f10fd6cae13e..1fef0e711056 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -2,6 +2,14 @@
#
# Allwinner sunXi SoC drivers
#
+
+config SUNXI_MBUS
+ bool
+ default ARCH_SUNXI
+ help
+ Say y to enable the fixups needed to support the Allwinner
+ MBUS DMA quirks.
+
config SUNXI_SRAM
bool
default ARCH_SUNXI
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile
index 7816fbbec387..549159571d4f 100644
--- a/drivers/soc/sunxi/Makefile
+++ b/drivers/soc/sunxi/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SUNXI_MBUS) += sunxi_mbus.o
obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
diff --git a/drivers/soc/sunxi/sunxi_mbus.c b/drivers/soc/sunxi/sunxi_mbus.c
new file mode 100644
index 000000000000..e9925c8487d7
--- /dev/null
+++ b/drivers/soc/sunxi/sunxi_mbus.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Maxime Ripard <maxime@cerno.tech> */
+
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const char * const sunxi_mbus_devices[] = {
+ /*
+ * The display engine virtual devices are not strictly speaking
+ * connected to the MBUS, but since DRM will perform all the
+ * memory allocations and DMA operations through that device, we
+ * need to have the quirk on those devices too.
+ */
+ "allwinner,sun4i-a10-display-engine",
+ "allwinner,sun5i-a10s-display-engine",
+ "allwinner,sun5i-a13-display-engine",
+ "allwinner,sun6i-a31-display-engine",
+ "allwinner,sun6i-a31s-display-engine",
+ "allwinner,sun7i-a20-display-engine",
+ "allwinner,sun8i-a23-display-engine",
+ "allwinner,sun8i-a33-display-engine",
+ "allwinner,sun8i-a83t-display-engine",
+ "allwinner,sun8i-h3-display-engine",
+ "allwinner,sun8i-r40-display-engine",
+ "allwinner,sun8i-v3s-display-engine",
+ "allwinner,sun9i-a80-display-engine",
+ "allwinner,sun50i-a64-display-engine",
+
+ /*
+ * And now we have the regular devices connected to the MBUS
+ * (that we know of).
+ */
+ "allwinner,sun4i-a10-csi1",
+ "allwinner,sun4i-a10-display-backend",
+ "allwinner,sun4i-a10-display-frontend",
+ "allwinner,sun4i-a10-video-engine",
+ "allwinner,sun5i-a13-display-backend",
+ "allwinner,sun5i-a13-video-engine",
+ "allwinner,sun6i-a31-csi",
+ "allwinner,sun6i-a31-display-backend",
+ "allwinner,sun7i-a20-csi0",
+ "allwinner,sun7i-a20-display-backend",
+ "allwinner,sun7i-a20-display-frontend",
+ "allwinner,sun7i-a20-video-engine",
+ "allwinner,sun8i-a23-display-backend",
+ "allwinner,sun8i-a23-display-frontend",
+ "allwinner,sun8i-a33-display-backend",
+ "allwinner,sun8i-a33-display-frontend",
+ "allwinner,sun8i-a33-video-engine",
+ "allwinner,sun8i-a83t-csi",
+ "allwinner,sun8i-h3-csi",
+ "allwinner,sun8i-h3-video-engine",
+ "allwinner,sun8i-v3s-csi",
+ "allwinner,sun9i-a80-display-backend",
+ "allwinner,sun50i-a64-csi",
+ "allwinner,sun50i-a64-video-engine",
+ "allwinner,sun50i-h5-video-engine",
+ NULL,
+};
+
+static int sunxi_mbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+ int ret;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ /*
+ * Only the devices that need a large memory bandwidth do DMA
+ * directly over the memory bus (called MBUS), instead of going
+ * through the regular system bus.
+ */
+ if (!of_device_compatible_match(dev->of_node, sunxi_mbus_devices))
+ return NOTIFY_DONE;
+
+ /*
+ * Devices with an interconnects property have the MBUS
+ * relationship described in their DT and dealt with by
+ * of_dma_configure, so we can just skip them.
+ *
+ * Older DTs or SoCs who are not clearly understood need to set
+ * that DMA offset though.
+ */
+ if (of_find_property(dev->of_node, "interconnects", NULL))
+ return NOTIFY_DONE;
+
+ ret = dma_direct_set_offset(dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ dev_err(dev, "Couldn't setup our DMA offset: %d\n", ret);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sunxi_mbus_nb = {
+ .notifier_call = sunxi_mbus_notifier,
+};
+
+static const char * const sunxi_mbus_platforms[] __initconst = {
+ "allwinner,sun4i-a10",
+ "allwinner,sun5i-a10s",
+ "allwinner,sun5i-a13",
+ "allwinner,sun6i-a31",
+ "allwinner,sun7i-a20",
+ "allwinner,sun8i-a23",
+ "allwinner,sun8i-a33",
+ "allwinner,sun8i-a83t",
+ "allwinner,sun8i-h3",
+ "allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
+ "allwinner,sun8i-v3s",
+ "allwinner,sun9i-a80",
+ "allwinner,sun50i-a64",
+ "allwinner,sun50i-h5",
+ "nextthing,gr8",
+ NULL,
+};
+
+static int __init sunxi_mbus_init(void)
+{
+ if (!of_device_compatible_match(of_root, sunxi_mbus_platforms))
+ return 0;
+
+ bus_register_notifier(&platform_bus_type, &sunxi_mbus_nb);
+ return 0;
+}
+arch_initcall(sunxi_mbus_init);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
index bdbf76bb184f..5b1ee28e4272 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra124.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -101,8 +101,7 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
{
- int i, threshold, cpu_speedo_0_value, soc_speedo_0_value;
- int cpu_iddq_value, gpu_iddq_value, soc_iddq_value;
+ int i, threshold, soc_speedo_0_value;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
@@ -111,25 +110,17 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
-
- /* GPU Speedo is stored in CPU_SPEEDO_2 */
- sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
-
- soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
-
- cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
- soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
- gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
-
- sku_info->cpu_speedo_value = cpu_speedo_0_value;
-
+ sku_info->cpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
if (sku_info->cpu_speedo_value == 0) {
pr_warn("Tegra Warning: Speedo value not fused.\n");
WARN_ON(1);
return;
}
+ /* GPU Speedo is stored in CPU_SPEEDO_2 */
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+
rev_sku_to_speedo_ids(sku_info, &threshold);
sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 70d3f6e1aa33..695d0b7f9a8a 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -94,7 +94,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
unsigned int i;
for (i = 0; i < num; i++)
- if (value < speedos[num])
+ if (value < speedos[i])
return i;
return -EINVAL;
@@ -102,7 +102,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
{
- int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+ int cpu_speedo[3], soc_speedo[3];
unsigned int index;
u8 speedo_revision;
@@ -122,10 +122,6 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
- cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
- soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
- gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
-
/*
* Determine CPU, GPU and SoC speedo values depending on speedo fusing
* revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index f5b82ffa637b..7e2fb1c16af1 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-# 64-bit ARM SoCs from TI
-if ARM64
-
-if ARCH_K3
-
-config ARCH_K3_AM6_SOC
- bool "K3 AM6 SoC"
- help
- Enable support for TI's AM6 SoC Family support
-
-config ARCH_K3_J721E_SOC
- bool "K3 J721E SoC"
- help
- Enable support for TI's J721E SoC Family support
-
-endif
-
-endif
#
# TI SOC drivers
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 1147dc4c1d59..119164abcb41 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -137,10 +137,12 @@ struct k3_ring_state {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
+ * @state: Ring state
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
* @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
*/
struct k3_ring {
struct k3_ring_rt_regs __iomem *rt;
@@ -160,6 +162,7 @@ struct k3_ring {
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
+ struct device *dma_dev;
};
struct k3_ringacc_ops {
@@ -365,20 +368,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- ring->size,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -398,20 +397,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
enum k3_ring_mode mode)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- mode,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -478,20 +473,15 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -521,11 +511,12 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
k3_ringacc_ring_free_sci(ring);
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt, ring->ring_mem_dma);
ring->flags = 0;
ring->ops = NULL;
+ ring->dma_dev = NULL;
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
clear_bit(ring->proxy_id, ringacc->proxy_inuse);
ring->proxy = NULL;
@@ -575,28 +566,26 @@ EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
- u32 ring_idx;
int ret;
if (!ringacc->tisci)
return -EINVAL;
- ring_idx = ring->ring_id;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring_idx,
- lower_32_bits(ring->ring_mem_dma),
- upper_32_bits(ring->ring_mem_dma),
- ring->size,
- ring->mode,
- ring->elm_size,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
- ret, ring_idx);
+ ret, ring->ring_id);
return ret;
}
@@ -648,8 +637,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
switch (ring->mode) {
case K3_RINGACC_RING_MODE_RING:
ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
break;
case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
if (ring->proxy)
ring->ops = &k3_ring_mode_proxy_ops;
else
@@ -661,9 +654,9 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
goto err_free_proxy;
}
- ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
- ring->size * (4 << ring->elm_size),
- &ring->ring_mem_dma, GFP_KERNEL);
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
if (!ring->ring_mem_virt) {
dev_err(ringacc->dev, "Failed to alloc ring mem\n");
ret = -ENOMEM;
@@ -684,12 +677,13 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
return 0;
err_free_mem:
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt,
ring->ring_mem_dma);
err_free_ops:
ring->ops = NULL;
+ ring->dma_dev = NULL;
err_free_proxy:
ring->proxy = NULL;
return ret;
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index bbbc2d2b7091..fd91129de6e5 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,6 +40,7 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" }
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 8c863ecb1c60..7b5cb5d48f7d 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -500,7 +500,7 @@ EXPORT_SYMBOL_GPL(knav_dma_open_channel);
/**
* knav_dma_close_channel() - Destroy a dma channel
*
- * channel: dma channel handle
+ * @channel: dma channel handle
*
*/
void knav_dma_close_channel(void *channel)
@@ -749,8 +749,9 @@ static int knav_dma_probe(struct platform_device *pdev)
pm_runtime_enable(kdev->dev);
ret = pm_runtime_get_sync(kdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
- return ret;
+ goto err_pm_disable;
}
/* Initialise all packet dmas */
@@ -764,7 +765,8 @@ static int knav_dma_probe(struct platform_device *pdev)
if (list_empty(&kdev->list)) {
dev_err(dev, "no valid dma instance\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_sync;
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
@@ -772,6 +774,13 @@ static int knav_dma_probe(struct platform_device *pdev)
device_ready = true;
return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
}
static int knav_dma_remove(struct platform_device *pdev)
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index a460f201bf8e..2e521f1eda96 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
/**
* knav_queue_notify: qmss queue notfier call
*
- * @inst: qmss queue instance like accumulator
+ * @inst: - qmss queue instance like accumulator
*/
void knav_queue_notify(struct knav_queue_inst *inst)
{
@@ -511,10 +511,10 @@ static int knav_queue_flush(struct knav_queue *qh)
/**
* knav_queue_open() - open a hardware queue
- * @name - name to give the queue handle
- * @id - desired queue number if any or specifes the type
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
* of queue
- * @flags - the following flags are applicable to queues:
+ * @flags: - the following flags are applicable to queues:
* KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
* exclusive by default.
* Subsequent attempts to open a shared queue should
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(knav_queue_open);
/**
* knav_queue_close() - close a hardware queue handle
- * @qh - handle to close
+ * @qhandle: - handle to close
*/
void knav_queue_close(void *qhandle)
{
@@ -572,9 +572,9 @@ EXPORT_SYMBOL_GPL(knav_queue_close);
/**
* knav_queue_device_control() - Perform control operations on a queue
- * @qh - queue handle
- * @cmd - control commands
- * @arg - command argument
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
*
* Returns 0 on success, errno otherwise.
*/
@@ -623,10 +623,10 @@ EXPORT_SYMBOL_GPL(knav_queue_device_control);
/**
* knav_queue_push() - push data (or descriptor) to the tail of a queue
- * @qh - hardware queue handle
- * @data - data to push
- * @size - size of data to push
- * @flags - can be used to pass additional information
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
*
* Returns 0 on success, errno otherwise.
*/
@@ -646,8 +646,8 @@ EXPORT_SYMBOL_GPL(knav_queue_push);
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
- * @qh - hardware queue handle
- * @size - (optional) size of the data pop'ed.
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
*
* Returns a DMA address on success, 0 on failure.
*/
@@ -746,9 +746,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
/**
* knav_pool_create() - Create a pool of descriptors
- * @name - name to give the pool handle
- * @num_desc - numbers of descriptors in the pool
- * @region_id - QMSS region id from which the descriptors are to be
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
* allocated.
*
* Returns a pool handle on success.
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(knav_pool_create);
/**
* knav_pool_destroy() - Free a pool of descriptors
- * @pool - pool handle
+ * @ph: - pool handle
*/
void knav_pool_destroy(void *ph)
{
@@ -884,7 +884,7 @@ EXPORT_SYMBOL_GPL(knav_pool_destroy);
/**
* knav_pool_desc_get() - Get a descriptor from the pool
- * @pool - pool handle
+ * @ph: - pool handle
*
* Returns descriptor from the pool.
*/
@@ -905,7 +905,8 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_get);
/**
* knav_pool_desc_put() - return a descriptor to the pool
- * @pool - pool handle
+ * @ph: - pool handle
+ * @desc: - virtual address
*/
void knav_pool_desc_put(void *ph, void *desc)
{
@@ -918,11 +919,11 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_put);
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
- * @pool - pool handle
- * @desc - address of descriptor to map
- * @size - size of descriptor to map
- * @dma - DMA address return pointer
- * @dma_sz - adjusted return pointer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
*
* Returns 0 on success, errno otherwise.
*/
@@ -945,9 +946,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_map);
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
- * @pool - pool handle
- * @dma - DMA address of descriptor to unmap
- * @dma_sz - size of descriptor to unmap
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
*
* Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
* error values on return.
@@ -968,7 +969,7 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
/**
* knav_pool_count() - Get the number of descriptors in pool.
- * @pool - pool handle
+ * @ph: - pool handle
* Returns number of elements in the pool.
*/
int knav_pool_count(void *ph)
@@ -1307,12 +1308,11 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
struct device_node *queue_pools)
{
struct device_node *type, *range;
- int ret;
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
- ret = knav_setup_queue_range(kdev, range);
/* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
}
}
@@ -1784,6 +1784,7 @@ static int knav_queue_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
@@ -1851,9 +1852,10 @@ static int knav_queue_probe(struct platform_device *pdev)
if (ret)
goto err;
- regions = of_get_child_by_name(node, "descriptor-regions");
+ regions = of_get_child_by_name(node, "descriptor-regions");
if (!regions) {
dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
goto err;
}
ret = knav_queue_setup_regions(kdev, regions);
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index 980b04c38fd9..f97f629b6b6b 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
@@ -41,6 +43,7 @@ struct omap_prm_domain {
u16 pwrstst;
const struct omap_prm_domain_map *cap;
u32 pwrstctrl_saved;
+ unsigned int uses_pm_clk:1;
};
struct omap_rst_map {
@@ -121,6 +124,10 @@ static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
.statechange = 1,
};
+static const struct omap_prm_domain_map omap_prm_alwon = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE),
+};
+
static const struct omap_rst_map rst_map_0[] = {
{ .rst = 0, .st = 0 },
{ .rst = -1 },
@@ -187,14 +194,40 @@ static const struct omap_rst_map am3_wkup_rst_map[] = {
};
static const struct omap_prm_data am3_prm_data[] = {
- { .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
- { .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
- { .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "per", .base = 0x44e00c00,
+ .pwrstctrl = 0xc, .pwrstst = 0x8, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x0, .rstmap = am3_per_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44e00d00,
+ .pwrstctrl = 0x4, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "mpu", .base = 0x44e00e00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
+ .name = "device", .base = 0x44e00f00,
+ .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "rtc", .base = 0x44e01000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
{
.name = "gfx", .base = 0x44e01100,
.pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
.rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
+ {
+ .name = "cefuse", .base = 0x44e01200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
{ },
};
@@ -325,6 +358,38 @@ static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
return 0;
}
+/*
+ * Note that ti-sysc already manages the module clocks separately so
+ * no need to manage those. Interconnect instances need clocks managed
+ * for simple-pm-bus.
+ */
+static int omap_prm_domain_attach_clock(struct device *dev,
+ struct omap_prm_domain *prmd)
+{
+ struct device_node *np = dev->of_node;
+ int error;
+
+ if (!of_device_is_compatible(np, "simple-pm-bus"))
+ return 0;
+
+ if (!of_property_read_bool(np, "clocks"))
+ return 0;
+
+ error = pm_clk_create(dev);
+ if (error)
+ return error;
+
+ error = of_pm_clk_add_clks(dev);
+ if (error < 0) {
+ pm_clk_destroy(dev);
+ return error;
+ }
+
+ prmd->uses_pm_clk = 1;
+
+ return 0;
+}
+
static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
@@ -349,6 +414,10 @@ static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
+ ret = omap_prm_domain_attach_clock(dev, prmd);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -356,7 +425,11 @@ static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct generic_pm_domain_data *genpd_data;
+ struct omap_prm_domain *prmd;
+ prmd = genpd_to_prm_domain(domain);
+ if (prmd->uses_pm_clk)
+ pm_clk_destroy(dev);
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
}
@@ -393,6 +466,7 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
prmd->pd.power_off = omap_prm_domain_power_off;
prmd->pd.attach_dev = omap_prm_domain_attach_dev;
prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+ prmd->pd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&prmd->pd, NULL, true);
error = of_genpd_add_provider_simple(np, &prmd->pd);
@@ -484,6 +558,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
int ret = 0;
+ /* Nothing to do if the reset is already deasserted */
+ if (!omap_reset_status(rcdev, id))
+ return 0;
+
has_rstst = reset->prm->data->rstst ||
(reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index d2f5e7001a93..64f3e3105540 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/rtc.h>
#include <linux/rtc/rtc-omap.h>
#include <linux/sizes.h>
@@ -135,13 +136,11 @@ static int am33xx_push_sram_idle(void)
static int am33xx_do_sram_idle(u32 wfi_flags)
{
- int ret = 0;
-
if (!m3_ipc || !pm_ops)
return 0;
if (wfi_flags & WFI_FLAG_WAKE_M3)
- ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
@@ -555,16 +554,26 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
#endif /* CONFIG_SUSPEND */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto err_pm_runtime_disable;
+ }
+
ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
- goto err_put_wkup_m3_ipc;
+ goto err_pm_runtime_put;
}
return 0;
-err_put_wkup_m3_ipc:
+err_pm_runtime_put:
+ pm_runtime_put_sync(dev);
+err_pm_runtime_disable:
+ pm_runtime_disable(dev);
wkup_m3_ipc_put(m3_ipc);
err_free_sram:
am33xx_pm_free_sram();
@@ -574,6 +583,8 @@ err_free_sram:
static int am33xx_pm_remove(struct platform_device *pdev)
{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (pm_ops->deinit)
pm_ops->deinit();
suspend_set_ops(NULL);
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index cc0b4ad7a3d3..5d6e7132a5c4 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -126,8 +126,6 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
int ret = 0;
data = of_device_get_match_data(dev);
- if (IS_ERR(data))
- return -ENODEV;
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
@@ -175,10 +173,6 @@ static int pruss_probe(struct platform_device *pdev)
const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
data = of_device_get_match_data(&pdev->dev);
- if (IS_ERR(data)) {
- dev_err(dev, "missing private data\n");
- return -ENODEV;
- }
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index 0eb9462f609e..a1d9c027022a 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -89,6 +89,18 @@ static int ti_sci_inta_msi_alloc_descs(struct device *dev,
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
}
+ for (i = 0; i < res->desc[set].num_sec; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ ti_sci_inta_msi_free_descs(dev);
+ return -ENOMEM;
+ }
+
+ msi_desc->inta.dev_index = res->desc[set].start_sec + i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ count++;
+ }
}
return count;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index e9ece45d7a33..c3e2161df732 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -218,6 +218,7 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @mem_type: memory type value read directly from emif
*
* wkup_m3 must know what memory type is in use to properly suspend
@@ -230,6 +231,7 @@ static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
/**
* wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @addr: Physical address from which resume code should execute
*/
static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
@@ -239,6 +241,7 @@ static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
/**
* wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns code representing the status of a low power mode transition.
* 0 - Successful transition
@@ -260,6 +263,7 @@ static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_prepare_low_power - Request preparation for transition to
* low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @state: A kernel suspend state to enter, either MEM or STANDBY
*
* Returns 0 if preparation was successful, otherwise returns error code
@@ -315,6 +319,7 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/**
* wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns 0 if reset was successful, otherwise returns error code
*/
@@ -362,8 +367,7 @@ static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_set_rtc_only - Set the rtc_only flag
- * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
- * wakeup src value
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*/
static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
{
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 646512d7276f..0b1708dae361 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -4,6 +4,7 @@ menu "Xilinx SoC drivers"
config XILINX_VCU
tristate "Xilinx VCU logicoreIP Init"
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
Provides the driver to enable and disable the isolation between the
processing system and programmable logic part by using the logicoreIP
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a3aa40996f13..14daad4efc58 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -10,39 +10,12 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-
-/* Address map for different registers implemented in the VCU LogiCORE IP. */
-#define VCU_ECODER_ENABLE 0x00
-#define VCU_DECODER_ENABLE 0x04
-#define VCU_MEMORY_DEPTH 0x08
-#define VCU_ENC_COLOR_DEPTH 0x0c
-#define VCU_ENC_VERTICAL_RANGE 0x10
-#define VCU_ENC_FRAME_SIZE_X 0x14
-#define VCU_ENC_FRAME_SIZE_Y 0x18
-#define VCU_ENC_COLOR_FORMAT 0x1c
-#define VCU_ENC_FPS 0x20
-#define VCU_MCU_CLK 0x24
-#define VCU_CORE_CLK 0x28
-#define VCU_PLL_BYPASS 0x2c
-#define VCU_ENC_CLK 0x30
-#define VCU_PLL_CLK 0x34
-#define VCU_ENC_VIDEO_STANDARD 0x38
-#define VCU_STATUS 0x3c
-#define VCU_AXI_ENC_CLK 0x40
-#define VCU_AXI_DEC_CLK 0x44
-#define VCU_AXI_MCU_CLK 0x48
-#define VCU_DEC_VIDEO_STANDARD 0x4c
-#define VCU_DEC_FRAME_SIZE_X 0x50
-#define VCU_DEC_FRAME_SIZE_Y 0x54
-#define VCU_DEC_FPS 0x58
-#define VCU_BUFFER_B_FRAME 0x5c
-#define VCU_WPP_EN 0x60
-#define VCU_PLL_CLK_DEC 0x64
-#define VCU_GASKET_INIT 0x74
-#define VCU_GASKET_VALUE 0x03
+#include <linux/regmap.h>
/* vcu slcr registers, bitmask and shift */
#define VCU_PLL_CTRL 0x24
@@ -106,11 +79,20 @@ struct xvcu_device {
struct device *dev;
struct clk *pll_ref;
struct clk *aclk;
- void __iomem *logicore_reg_ba;
+ struct regmap *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
u32 coreclk;
};
+static struct regmap_config vcu_settings_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
/**
* struct xvcu_pll_cfg - Helper data
* @fbdiv: The integer portion of the feedback divider to the PLL
@@ -300,10 +282,12 @@ static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
int ret, i;
const struct xvcu_pll_cfg *found = NULL;
- inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
- deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
- coreclk = xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
- mcuclk = xvcu_read(xvcu->logicore_reg_ba, VCU_MCU_CLK) * MHZ;
+ regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK, &inte);
+ regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC, &deci);
+ regmap_read(xvcu->logicore_reg_ba, VCU_CORE_CLK, &coreclk);
+ coreclk *= MHZ;
+ regmap_read(xvcu->logicore_reg_ba, VCU_MCU_CLK, &mcuclk);
+ mcuclk *= MHZ;
if (!mcuclk || !coreclk) {
dev_err(xvcu->dev, "Invalid mcu and core clock data\n");
return -EINVAL;
@@ -498,6 +482,7 @@ static int xvcu_probe(struct platform_device *pdev)
{
struct resource *res;
struct xvcu_device *xvcu;
+ void __iomem *regs;
int ret;
xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
@@ -518,17 +503,32 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
- }
+ xvcu->logicore_reg_ba =
+ syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_info(&pdev->dev,
+ "could not find xlnx,vcu-settings: trying direct register access\n");
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
- xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->logicore_reg_ba) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ devm_regmap_init_mmio(&pdev->dev, regs,
+ &vcu_settings_regmap_config);
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(xvcu->logicore_reg_ba);
+ }
}
xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
@@ -560,7 +560,7 @@ static int xvcu_probe(struct platform_device *pdev)
* Bit 0 : Gasket isolation
* Bit 1 : put VCU out of reset
*/
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
/* Do the PLL Settings based on the ref clk,core and mcu clk freq */
ret = xvcu_set_pll(xvcu);
@@ -571,8 +571,6 @@ static int xvcu_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, xvcu);
- dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
-
return 0;
error_pll_ref:
@@ -599,7 +597,7 @@ static int xvcu_remove(struct platform_device *pdev)
return -ENODEV;
/* Add the the Gasket isolation and put the VCU in reset. */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->pll_ref);
clk_disable_unprepare(xvcu->aclk);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 18d54f9fd715..ddad5d274ee8 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -588,7 +588,6 @@ static const struct cedrus_variant sun50i_h6_cedrus_variant = {
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
- .quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index e61c41853ba2..c96077aaef49 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -33,8 +33,6 @@
#define CEDRUS_CAPABILITY_MPEG2_DEC BIT(3)
#define CEDRUS_CAPABILITY_VP8_DEC BIT(4)
-#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
-
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
@@ -168,7 +166,6 @@ struct cedrus_dec_ops {
struct cedrus_variant {
unsigned int capabilities;
- unsigned int quirks;
unsigned int mod_rate;
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 111cb91f8fc2..e2f2ff609c7e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -224,24 +224,6 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
return ret;
}
- /*
- * The VPU is only able to handle bus addresses so we have to subtract
- * the RAM offset to the physcal addresses.
- *
- * This information will eventually be obtained from device-tree.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
-
-#ifdef PHYS_PFN_OFFSET
- if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET)) {
- ret = dma_direct_set_offset(dev->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
- }
-#endif
-
ret = of_reserved_mem_device_init(dev->dev);
if (ret && ret != -ENODEV) {
dev_err(dev->dev, "Failed to reserve memory\n");
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 7a897d51969f..ec1d24693eba 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -98,7 +98,7 @@ static int __optee_enumerate_devices(u32 func)
return -ENODEV;
/* Open session with device enumeration pseudo TA */
- memcpy(sess_arg.uuid, pta_uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &pta_uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;