summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-26 10:32:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-26 10:32:47 -0700
commitcc3c470ae4ad758b8ddad825ab199f7eaa8b0a9e (patch)
tree779e3dc27a1f4d1468bad2ea5cbfca28c64cf1cd /drivers
parentae862183285cbb2ef9032770d98ffa9becffe9d5 (diff)
parentd4a3b442335b0a9476248c5d6dc07f6f8580a9ca (diff)
downloadlinux-cc3c470ae4ad758b8ddad825ab199f7eaa8b0a9e.tar.bz2
Merge tag 'arm-drivers-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM driver updates from Arnd Bergmann: "There are minor updates to SoC specific drivers for chips by Rockchip, Samsung, NVIDIA, TI, NXP, i.MX, Qualcomm, and Broadcom. Noteworthy driver changes include: - Several conversions of DT bindings to yaml format. - Renesas adds driver support for R-Car V4H, RZ/V2M and RZ/G2UL SoCs. - Qualcomm adds a bus driver for the SSC (Snapdragon Sensor Core), and support for more chips in the RPMh power domains and the soc-id. - NXP has a new driver for the HDMI blk-ctrl on i.MX8MP. - Apple M1 gains support for the on-chip NVMe controller, making it possible to finally use the internal disks. This also includes SoC drivers for their RTKit IPC and for the SART DMA address filter. For other subsystems that merge their drivers through the SoC tree, we have - Firmware drivers for the ARM firmware stack including TEE, OP-TEE, SCMI and FF-A get a number of smaller updates and cleanups. OP-TEE now has a cache for firmware argument structures as an optimization, and SCMI now supports the 3.1 version of the specification. - Reset controller updates to Amlogic, ASpeed, Renesas and ACPI drivers - Memory controller updates for Tegra, and a few updates for other platforms" * tag 'arm-drivers-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (159 commits) memory: tegra: Add MC error logging on Tegra186 onward memory: tegra: Add memory controller channels support memory: tegra: Add APE memory clients for Tegra234 memory: tegra: Add Tegra234 support nvme-apple: fix sparse endianess warnings soc/tegra: pmc: Document core domain fields soc: qcom: pdr: use static for servreg_* variables soc: imx: fix semicolon.cocci warnings soc: renesas: R-Car V3U is R-Car Gen4 soc: imx: add i.MX8MP HDMI blk-ctrl soc: imx: imx8m-blk-ctrl: Add i.MX8MP media blk-ctrl soc: imx: add i.MX8MP HSIO blk-ctrl soc: imx: imx8m-blk-ctrl: set power device name soc: qcom: llcc: Add sc8180x and sc8280xp configurations dt-bindings: arm: msm: Add sc8180x and sc8280xp LLCC compatibles soc/tegra: pmc: Select REGMAP dt-bindings: reset: st,sti-powerdown: Convert to yaml dt-bindings: reset: st,sti-picophyreset: Convert to yaml dt-bindings: reset: socfpga: Convert to yaml dt-bindings: reset: snps,axs10x-reset: Convert to yaml ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/Kconfig11
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/brcmstb_gisb.c1
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c389
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/firmware/arm_ffa/driver.c24
-rw-r--r--drivers/firmware/arm_scmi/Kconfig1
-rw-r--r--drivers/firmware/arm_scmi/base.c46
-rw-r--r--drivers/firmware/arm_scmi/clock.c343
-rw-r--r--drivers/firmware/arm_scmi/common.h225
-rw-r--r--drivers/firmware/arm_scmi/driver.c168
-rw-r--r--drivers/firmware/arm_scmi/optee.c144
-rw-r--r--drivers/firmware/arm_scmi/perf.c162
-rw-r--r--drivers/firmware/arm_scmi/power.c44
-rw-r--r--drivers/firmware/arm_scmi/protocols.h318
-rw-r--r--drivers/firmware/arm_scmi/reset.c40
-rw-r--r--drivers/firmware/arm_scmi/sensors.c645
-rw-r--r--drivers/firmware/arm_scmi/system.c9
-rw-r--r--drivers/firmware/arm_scmi/voltage.c218
-rw-r--r--drivers/firmware/qcom_scm.c4
-rw-r--r--drivers/firmware/ti_sci.c61
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/memory/brcmstb_dpfe.c10
-rw-r--r--drivers/memory/da8xx-ddrctl.c3
-rw-r--r--drivers/memory/emif.c15
-rw-r--r--drivers/memory/fsl-corenet-cf.c9
-rw-r--r--drivers/memory/omap-gpmc.c43
-rw-r--r--drivers/memory/renesas-rpc-if.c31
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c5
-rw-r--r--drivers/memory/tegra/Makefile2
-rw-r--r--drivers/memory/tegra/mc.c141
-rw-r--r--drivers/memory/tegra/mc.h50
-rw-r--r--drivers/memory/tegra/tegra186-emc.c3
-rw-r--r--drivers/memory/tegra/tegra186.c39
-rw-r--r--drivers/memory/tegra/tegra194.c9
-rw-r--r--drivers/memory/tegra/tegra234.c110
-rw-r--r--drivers/memory/ti-aemif.c4
-rw-r--r--drivers/memory/ti-emif-pm.c6
-rw-r--r--drivers/nvme/host/Kconfig13
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/apple.c1593
-rw-r--r--drivers/reset/Kconfig4
-rw-r--r--drivers/reset/core.c15
-rw-r--r--drivers/reset/reset-meson.c6
-rw-r--r--drivers/reset/reset-simple.c1
-rw-r--r--drivers/reset/reset-uniphier-glue.c75
-rw-r--r--drivers/soc/Makefile4
-rw-r--r--drivers/soc/apple/Kconfig24
-rw-r--r--drivers/soc/apple/Makefile6
-rw-r--r--drivers/soc/apple/rtkit-crashlog.c154
-rw-r--r--drivers/soc/apple/rtkit-internal.h62
-rw-r--r--drivers/soc/apple/rtkit.c958
-rw-r--r--drivers/soc/apple/sart.c328
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c3
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpcv2.c430
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c124
-rw-r--r--drivers/soc/imx/imx8mp-blk-ctrl.c696
-rw-r--r--drivers/soc/qcom/llcc-qcom.c61
-rw-r--r--drivers/soc/qcom/pdr_interface.c11
-rw-r--r--drivers/soc/qcom/pdr_internal.h20
-rw-r--r--drivers/soc/qcom/rpmhpd.c73
-rw-r--r--drivers/soc/qcom/smem.c305
-rw-r--r--drivers/soc/qcom/smp2p.c1
-rw-r--r--drivers/soc/qcom/smsm.c1
-rw-r--r--drivers/soc/qcom/socinfo.c26
-rw-r--r--drivers/soc/renesas/Kconfig26
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a779g0-sysc.c62
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.h1
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/renesas-soc.c23
-rw-r--r--drivers/soc/rockchip/Kconfig24
-rw-r--r--drivers/soc/rockchip/grf.c17
-rw-r--r--drivers/soc/rockchip/pm_domains.c10
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c8
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c17
-rw-r--r--drivers/soc/tegra/pmc.c35
-rw-r--r--drivers/soc/ti/knav_dma.c29
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c21
-rw-r--r--drivers/soc/ti/omap_prm.c7
-rw-r--r--drivers/soc/ti/pm33xx.c6
-rw-r--r--drivers/soc/ti/pruss.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c210
-rw-r--r--drivers/tee/Kconfig5
-rw-r--r--drivers/tee/optee/call.c240
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/tee/optee/ffa_abi.c38
-rw-r--r--drivers/tee/optee/optee_ffa.h12
-rw-r--r--drivers/tee/optee/optee_private.h31
-rw-r--r--drivers/tee/optee/optee_smc.h48
-rw-r--r--drivers/tee/optee/smc_abi.c197
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/tee/tee_shm.c85
97 files changed, 8198 insertions, 1306 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 3c68e174a113..7bfe998f3514 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -152,6 +152,17 @@ config QCOM_EBI2
Interface 2, which can be used to connect things like NAND Flash,
SRAM, ethernet adapters, FPGAs and LCD displays.
+config QCOM_SSC_BLOCK_BUS
+ bool "Qualcomm SSC Block Bus Init Driver"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable support for initializing the bus that connects
+ the SSC block's internal bus to the cNoC (configurantion NoC) on
+ (some) qcom SoCs.
+ The SSC (Snapdragon Sensor Core) block contains a gpio controller,
+ i2c/spi/uart controllers, a hexagon core, and a clock controller
+ which provides clocks for the above.
+
config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver"
default ARM64
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 16da51130d1a..d90eed189a65 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
+obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_OF) += simple-pm-bus.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 183d5cc37d42..b0c3704777e9 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -536,7 +536,6 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
- .suppress_bind_attrs = true,
},
};
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
new file mode 100644
index 000000000000..eedeb29a5ff3
--- /dev/null
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021, Michael Srba
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define SSCAON_CONFIG0_CLAMP_EN_OVRD BIT(4)
+#define SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL BIT(5)
+
+static const char *const qcom_ssc_block_pd_names[] = {
+ "ssc_cx",
+ "ssc_mx"
+};
+
+struct qcom_ssc_block_bus_data {
+ const char *const *pd_names;
+ struct device *pds[ARRAY_SIZE(qcom_ssc_block_pd_names)];
+ char __iomem *reg_mpm_sscaon_config0;
+ char __iomem *reg_mpm_sscaon_config1;
+ struct regmap *halt_map;
+ struct clk *xo_clk;
+ struct clk *aggre2_clk;
+ struct clk *gcc_im_sleep_clk;
+ struct clk *aggre2_north_clk;
+ struct clk *ssc_xo_clk;
+ struct clk *ssc_ahbs_clk;
+ struct reset_control *ssc_bcr;
+ struct reset_control *ssc_reset;
+ u32 ssc_axi_halt;
+ int num_pds;
+};
+
+static void reg32_set_bits(char __iomem *reg, u32 value)
+{
+ u32 tmp = ioread32(reg);
+
+ iowrite32(tmp | value, reg);
+}
+
+static void reg32_clear_bits(char __iomem *reg, u32 value)
+{
+ u32 tmp = ioread32(reg);
+
+ iowrite32(tmp & (~value), reg);
+}
+
+static int qcom_ssc_block_bus_init(struct device *dev)
+{
+ int ret;
+
+ struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(data->xo_clk);
+ if (ret) {
+ dev_err(dev, "error enabling xo_clk: %d\n", ret);
+ goto err_xo_clk;
+ }
+
+ ret = clk_prepare_enable(data->aggre2_clk);
+ if (ret) {
+ dev_err(dev, "error enabling aggre2_clk: %d\n", ret);
+ goto err_aggre2_clk;
+ }
+
+ ret = clk_prepare_enable(data->gcc_im_sleep_clk);
+ if (ret) {
+ dev_err(dev, "error enabling gcc_im_sleep_clk: %d\n", ret);
+ goto err_gcc_im_sleep_clk;
+ }
+
+ /*
+ * We need to intervene here because the HW logic driving these signals cannot handle
+ * initialization after power collapse by itself.
+ */
+ reg32_clear_bits(data->reg_mpm_sscaon_config0,
+ SSCAON_CONFIG0_CLAMP_EN_OVRD | SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL);
+ /* override few_ack/rest_ack */
+ reg32_clear_bits(data->reg_mpm_sscaon_config1, BIT(31));
+
+ ret = clk_prepare_enable(data->aggre2_north_clk);
+ if (ret) {
+ dev_err(dev, "error enabling aggre2_north_clk: %d\n", ret);
+ goto err_aggre2_north_clk;
+ }
+
+ ret = reset_control_deassert(data->ssc_reset);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_reset: %d\n", ret);
+ goto err_ssc_reset;
+ }
+
+ ret = reset_control_deassert(data->ssc_bcr);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_bcr: %d\n", ret);
+ goto err_ssc_bcr;
+ }
+
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 0);
+
+ ret = clk_prepare_enable(data->ssc_xo_clk);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_xo_clk: %d\n", ret);
+ goto err_ssc_xo_clk;
+ }
+
+ ret = clk_prepare_enable(data->ssc_ahbs_clk);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_ahbs_clk: %d\n", ret);
+ goto err_ssc_ahbs_clk;
+ }
+
+ return 0;
+
+err_ssc_ahbs_clk:
+ clk_disable(data->ssc_xo_clk);
+
+err_ssc_xo_clk:
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
+
+ reset_control_assert(data->ssc_bcr);
+
+err_ssc_bcr:
+ reset_control_assert(data->ssc_reset);
+
+err_ssc_reset:
+ clk_disable(data->aggre2_north_clk);
+
+err_aggre2_north_clk:
+ reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
+ reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
+
+ clk_disable(data->gcc_im_sleep_clk);
+
+err_gcc_im_sleep_clk:
+ clk_disable(data->aggre2_clk);
+
+err_aggre2_clk:
+ clk_disable(data->xo_clk);
+
+err_xo_clk:
+ return ret;
+}
+
+static void qcom_ssc_block_bus_deinit(struct device *dev)
+{
+ int ret;
+
+ struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
+
+ clk_disable(data->ssc_xo_clk);
+ clk_disable(data->ssc_ahbs_clk);
+
+ ret = reset_control_assert(data->ssc_bcr);
+ if (ret)
+ dev_err(dev, "error asserting ssc_bcr: %d\n", ret);
+
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
+
+ reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
+ reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
+
+ ret = reset_control_assert(data->ssc_reset);
+ if (ret)
+ dev_err(dev, "error asserting ssc_reset: %d\n", ret);
+
+ clk_disable(data->gcc_im_sleep_clk);
+
+ clk_disable(data->aggre2_north_clk);
+
+ clk_disable(data->aggre2_clk);
+ clk_disable(data->xo_clk);
+}
+
+static int qcom_ssc_block_bus_pds_attach(struct device *dev, struct device **pds,
+ const char *const *pd_names, size_t num_pds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ pds[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(pds[i])) {
+ ret = PTR_ERR(pds[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(pds[i], false);
+
+ return ret;
+};
+
+static void qcom_ssc_block_bus_pds_detach(struct device *dev, struct device **pds, size_t num_pds)
+{
+ int i;
+
+ for (i = 0; i < num_pds; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_ssc_block_bus_pds_enable(struct device **pds, size_t num_pds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0)
+ goto unroll_pd_votes;
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void qcom_ssc_block_bus_pds_disable(struct device **pds, size_t num_pds)
+{
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
+{
+ struct qcom_ssc_block_bus_data *data;
+ struct device_node *np = pdev->dev.of_node;
+ struct of_phandle_args halt_args;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ /* power domains */
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+
+ /* low level overrides for when the HW logic doesn't "just work" */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
+ data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_mpm_sscaon_config0))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config0),
+ "Failed to ioremap mpm_sscaon_config0\n");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config1");
+ data->reg_mpm_sscaon_config1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_mpm_sscaon_config1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config1),
+ "Failed to ioremap mpm_sscaon_config1\n");
+
+ /* resets */
+ data->ssc_bcr = devm_reset_control_get_exclusive(&pdev->dev, "ssc_bcr");
+ if (IS_ERR(data->ssc_bcr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_bcr),
+ "Failed to acquire reset: scc_bcr\n");
+
+ data->ssc_reset = devm_reset_control_get_exclusive(&pdev->dev, "ssc_reset");
+ if (IS_ERR(data->ssc_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_reset),
+ "Failed to acquire reset: ssc_reset:\n");
+
+ /* clocks */
+ data->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(data->xo_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->xo_clk),
+ "Failed to get clock: xo\n");
+
+ data->aggre2_clk = devm_clk_get(&pdev->dev, "aggre2");
+ if (IS_ERR(data->aggre2_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_clk),
+ "Failed to get clock: aggre2\n");
+
+ data->gcc_im_sleep_clk = devm_clk_get(&pdev->dev, "gcc_im_sleep");
+ if (IS_ERR(data->gcc_im_sleep_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->gcc_im_sleep_clk),
+ "Failed to get clock: gcc_im_sleep\n");
+
+ data->aggre2_north_clk = devm_clk_get(&pdev->dev, "aggre2_north");
+ if (IS_ERR(data->aggre2_north_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_north_clk),
+ "Failed to get clock: aggre2_north\n");
+
+ data->ssc_xo_clk = devm_clk_get(&pdev->dev, "ssc_xo");
+ if (IS_ERR(data->ssc_xo_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_xo_clk),
+ "Failed to get clock: ssc_xo\n");
+
+ data->ssc_ahbs_clk = devm_clk_get(&pdev->dev, "ssc_ahbs");
+ if (IS_ERR(data->ssc_ahbs_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_ahbs_clk),
+ "Failed to get clock: ssc_ahbs\n");
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,halt-regs", 1, 0,
+ &halt_args);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to parse qcom,halt-regs\n");
+
+ data->halt_map = syscon_node_to_regmap(halt_args.np);
+ of_node_put(halt_args.np);
+ if (IS_ERR(data->halt_map))
+ return PTR_ERR(data->halt_map);
+
+ data->ssc_axi_halt = halt_args.args[0];
+
+ qcom_ssc_block_bus_init(&pdev->dev);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
+{
+ struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev);
+
+ qcom_ssc_block_bus_deinit(&pdev->dev);
+
+ iounmap(data->reg_mpm_sscaon_config0);
+ iounmap(data->reg_mpm_sscaon_config1);
+
+ qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+ pm_runtime_disable(&pdev->dev);
+ pm_clk_destroy(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ssc_block_bus_of_match[] = {
+ { .compatible = "qcom,ssc-block-bus", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
+
+static struct platform_driver qcom_ssc_block_bus_driver = {
+ .probe = qcom_ssc_block_bus_probe,
+ .remove = qcom_ssc_block_bus_remove,
+ .driver = {
+ .name = "qcom-ssc-block-bus",
+ .of_match_table = qcom_ssc_block_bus_of_match,
+ },
+};
+
+module_platform_driver(qcom_ssc_block_bus_driver);
+
+MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 7a1b1f9e4933..18363aa2a49d 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3049,7 +3049,7 @@ static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("AM43*", SOC_AM4),
SOC_FLAG("DRA7*", SOC_DRA7),
- { /* sentinel */ },
+ { /* sentinel */ }
};
/*
@@ -3070,7 +3070,7 @@ static const struct soc_device_attribute sysc_soc_feat_match[] = {
SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
SOC_FLAG("OMAP3621", DIS_ISP),
- { /* sentinel */ },
+ { /* sentinel */ }
};
static int sysc_add_disabled(unsigned long base)
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 14f900047ac0..ec731e9e942b 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -398,11 +398,15 @@ static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
- if (ret.a0 != FFA_SUCCESS)
+ if (ret.a0 == FFA_SUCCESS) {
+ if (handle)
+ *handle = PACK_HANDLE(ret.a2, ret.a3);
+ } else if (ret.a0 == FFA_MEM_FRAG_RX) {
+ if (handle)
+ *handle = PACK_HANDLE(ret.a1, ret.a2);
+ } else {
return -EOPNOTSUPP;
-
- if (handle)
- *handle = PACK_HANDLE(ret.a2, ret.a3);
+ }
return frag_len;
}
@@ -426,10 +430,12 @@ static int ffa_mem_next_frag(u64 handle, u32 frag_len)
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
- if (ret.a0 != FFA_MEM_FRAG_RX)
- return -EOPNOTSUPP;
+ if (ret.a0 == FFA_MEM_FRAG_RX)
+ return ret.a3;
+ else if (ret.a0 == FFA_SUCCESS)
+ return 0;
- return ret.a3;
+ return -EOPNOTSUPP;
}
static int
@@ -582,7 +588,7 @@ static int ffa_partition_info_get(const char *uuid_str,
return -ENODEV;
}
- count = ffa_partition_probe(&uuid_null, &pbuf);
+ count = ffa_partition_probe(&uuid, &pbuf);
if (count <= 0)
return -ENOENT;
@@ -688,8 +694,6 @@ static void ffa_setup_partitions(void)
__func__, tpbuf->id);
continue;
}
-
- ffa_dev_set_drvdata(ffa_dev, drv_info);
}
kfree(pbuf);
}
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 7794bd41eaa0..1e7b7fec97d9 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -59,6 +59,7 @@ config ARM_SCMI_TRANSPORT_OPTEE
depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
select ARM_SCMI_HAVE_TRANSPORT
select ARM_SCMI_HAVE_SHMEM
+ select ARM_SCMI_HAVE_MSG
default y
help
This enables the OP-TEE service based transport for SCMI.
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index f5219334fd3a..20fba7370f4e 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -178,6 +178,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
__le32 *num_skip, *num_ret;
u32 tot_num_ret = 0, loop_num_ret;
struct device *dev = ph->dev;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
sizeof(*num_skip), 0, &t);
@@ -189,6 +190,9 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
list = t->rx.buf + sizeof(*num_ret);
do {
+ size_t real_list_sz;
+ u32 calc_list_sz;
+
/* Set the number of protocols to be skipped/already read */
*num_skip = cpu_to_le32(tot_num_ret);
@@ -197,8 +201,30 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
break;
loop_num_ret = le32_to_cpu(*num_ret);
- if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
- dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
+ if (!loop_num_ret)
+ break;
+
+ if (loop_num_ret > rev->num_protocols - tot_num_ret) {
+ dev_err(dev,
+ "No. Returned protocols > Total protocols.\n");
+ break;
+ }
+
+ if (t->rx.len < (sizeof(u32) * 2)) {
+ dev_err(dev, "Truncated reply - rx.len:%zd\n",
+ t->rx.len);
+ ret = -EPROTO;
+ break;
+ }
+
+ real_list_sz = t->rx.len - sizeof(u32);
+ calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
+ sizeof(u32);
+ if (calc_list_sz != real_list_sz) {
+ dev_err(dev,
+ "Malformed reply - real_sz:%zd calc_sz:%u\n",
+ real_list_sz, calc_list_sz);
+ ret = -EPROTO;
break;
}
@@ -208,7 +234,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
tot_num_ret += loop_num_ret;
ph->xops->reset_rx_to_maxsz(ph, t);
- } while (loop_num_ret);
+ } while (tot_num_ret < rev->num_protocols);
ph->xops->xfer_put(ph, t);
@@ -351,15 +377,19 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
- prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
- if (!prot_imp)
- return -ENOMEM;
-
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
ph->set_priv(ph, rev);
- scmi_base_attributes_get(ph);
+ ret = scmi_base_attributes_get(ph);
+ if (ret)
+ return ret;
+
+ prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8),
+ GFP_KERNEL);
+ if (!prot_imp)
+ return -ENOMEM;
+
scmi_base_vendor_id_get(ph, false);
scmi_base_vendor_id_get(ph, true);
scmi_base_implementation_version_get(ph);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 45600acc0f45..4d36a9a133d1 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -2,13 +2,15 @@
/*
* System Control and Management Interface (SCMI) Clock Protocol
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
*/
#include <linux/module.h>
+#include <linux/limits.h>
#include <linux/sort.h>
-#include "common.h"
+#include "protocols.h"
+#include "notify.h"
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
@@ -16,6 +18,9 @@ enum scmi_clock_protocol_cmd {
CLOCK_RATE_SET = 0x5,
CLOCK_RATE_GET = 0x6,
CLOCK_CONFIG_SET = 0x7,
+ CLOCK_NAME_GET = 0x8,
+ CLOCK_RATE_NOTIFY = 0x9,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
};
struct scmi_msg_resp_clock_protocol_attributes {
@@ -27,7 +32,10 @@ struct scmi_msg_resp_clock_protocol_attributes {
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
- u8 name[SCMI_MAX_STR_SIZE];
+#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
+#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 clock_enable_latency;
};
@@ -68,6 +76,24 @@ struct scmi_clock_set_rate {
__le32 value_high;
};
+struct scmi_msg_resp_set_rate_complete {
+ __le32 id;
+ __le32 rate_low;
+ __le32 rate_high;
+};
+
+struct scmi_msg_clock_rate_notify {
+ __le32 clk_id;
+ __le32 notify_enable;
+};
+
+struct scmi_clock_rate_notify_payld {
+ __le32 agent_id;
+ __le32 clock_id;
+ __le32 rate_low;
+ __le32 rate_high;
+};
+
struct clock_info {
u32 version;
int num_clocks;
@@ -76,6 +102,11 @@ struct clock_info {
struct scmi_clock_info *clk;
};
+static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
+ CLOCK_RATE_NOTIFY,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
+};
+
static int
scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct clock_info *ci)
@@ -102,9 +133,11 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
}
static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
- u32 clk_id, struct scmi_clock_info *clk)
+ u32 clk_id, struct scmi_clock_info *clk,
+ u32 version)
{
int ret;
+ u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
@@ -118,16 +151,33 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
+ u32 latency = 0;
+ attributes = le32_to_cpu(attr->attributes);
strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
- /* Is optional field clock_enable_latency provided ? */
- if (t->rx.len == sizeof(*attr))
- clk->enable_latency =
- le32_to_cpu(attr->clock_enable_latency);
- } else {
- clk->name[0] = '\0';
+ /* clock_enable_latency field is present only since SCMI v3.1 */
+ if (PROTOCOL_REV_MAJOR(version) >= 0x2)
+ latency = le32_to_cpu(attr->clock_enable_latency);
+ clk->enable_latency = latency ? : U32_MAX;
}
ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
+ if (SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
+ clk->name,
+ SCMI_MAX_STR_SIZE);
+
+ if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
+ clk->rate_changed_notifications = true;
+ if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
+ clk->rate_change_requested_notifications = true;
+ }
+
return ret;
}
@@ -143,81 +193,111 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
return 1;
}
-static int
-scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
- struct scmi_clock_info *clk)
-{
- u64 *rate = NULL;
- int ret, cnt;
- bool rate_discrete = false;
- u32 tot_rate_cnt = 0, rates_flag;
- u16 num_returned, num_remaining;
- struct scmi_xfer *t;
- struct scmi_msg_clock_describe_rates *clk_desc;
- struct scmi_msg_resp_clock_describe_rates *rlist;
+struct scmi_clk_ipriv {
+ u32 clk_id;
+ struct scmi_clock_info *clk;
+};
- ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
- sizeof(*clk_desc), 0, &t);
- if (ret)
- return ret;
+static void iter_clk_describe_prepare_message(void *message,
+ const unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_clock_describe_rates *msg = message;
+ const struct scmi_clk_ipriv *p = priv;
- clk_desc = t->tx.buf;
- rlist = t->rx.buf;
+ msg->id = cpu_to_le32(p->clk_id);
+ /* Set the number of rates to be skipped/already read */
+ msg->rate_index = cpu_to_le32(desc_index);
+}
- do {
- clk_desc->id = cpu_to_le32(clk_id);
- /* Set the number of rates to be skipped/already read */
- clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
+static int
+iter_clk_describe_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ struct scmi_clk_ipriv *p = priv;
+ const struct scmi_msg_resp_clock_describe_rates *r = response;
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- goto err;
+ flags = le32_to_cpu(r->num_rates_flags);
+ st->num_remaining = NUM_REMAINING(flags);
+ st->num_returned = NUM_RETURNED(flags);
+ p->clk->rate_discrete = RATE_DISCRETE(flags);
- rates_flag = le32_to_cpu(rlist->num_rates_flags);
- num_remaining = NUM_REMAINING(rates_flag);
- rate_discrete = RATE_DISCRETE(rates_flag);
- num_returned = NUM_RETURNED(rates_flag);
+ return 0;
+}
- if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
- dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
+static int
+iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ int ret = 0;
+ struct scmi_clk_ipriv *p = priv;
+ const struct scmi_msg_resp_clock_describe_rates *r = response;
+
+ if (!p->clk->rate_discrete) {
+ switch (st->desc_index + st->loop_idx) {
+ case 0:
+ p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
break;
- }
-
- if (!rate_discrete) {
- clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
- clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
- clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
- dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
- clk->range.min_rate, clk->range.max_rate,
- clk->range.step_size);
+ case 1:
+ p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
+ break;
+ case 2:
+ p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
+ break;
+ default:
+ ret = -EINVAL;
break;
}
+ } else {
+ u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
- rate = &clk->list.rates[tot_rate_cnt];
- for (cnt = 0; cnt < num_returned; cnt++, rate++) {
- *rate = RATE_TO_U64(rlist->rate[cnt]);
- dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
- }
+ *rate = RATE_TO_U64(r->rate[st->loop_idx]);
+ p->clk->list.num_rates++;
+ //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
+ }
- tot_rate_cnt += num_returned;
+ return ret;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ int ret;
- ph->xops->reset_rx_to_maxsz(ph, t);
- /*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
- */
- } while (num_returned && num_remaining);
+ void *iter;
+ struct scmi_msg_clock_describe_rates *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_clk_describe_prepare_message,
+ .update_state = iter_clk_describe_update_state,
+ .process_response = iter_clk_describe_process_response,
+ };
+ struct scmi_clk_ipriv cpriv = {
+ .clk_id = clk_id,
+ .clk = clk,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
+ CLOCK_DESCRIBE_RATES,
+ sizeof(*msg), &cpriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
- if (rate_discrete && rate) {
- clk->list.num_rates = tot_rate_cnt;
- sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
- rate_cmp_func, NULL);
+ if (!clk->rate_discrete) {
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
+ clk->range.min_rate, clk->range.max_rate,
+ clk->range.step_size);
+ } else if (clk->list.num_rates) {
+ sort(clk->list.rates, clk->list.num_rates,
+ sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
}
- clk->rate_discrete = rate_discrete;
-
-err:
- ph->xops->xfer_put(ph, t);
return ret;
}
@@ -266,10 +346,22 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
- if (flags & CLOCK_SET_ASYNC)
+ if (flags & CLOCK_SET_ASYNC) {
ret = ph->xops->do_xfer_with_response(ph, t);
- else
+ if (!ret) {
+ struct scmi_msg_resp_set_rate_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == clk_id)
+ dev_dbg(ph->dev,
+ "Clk ID %d set async to %llu\n", clk_id,
+ get_unaligned_le64(&resp->rate_low));
+ else
+ ret = -EPROTO;
+ }
+ } else {
ret = ph->xops->do_xfer(ph, t);
+ }
if (ci->max_async_req)
atomic_dec(&ci->cur_async_req);
@@ -355,13 +447,111 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.disable_atomic = scmi_clock_disable_atomic,
};
+static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
+ u32 clk_id, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_clock_rate_notify *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->clk_id = cpu_to_le32(clk_id);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_clock_rate_notify_payld *p = payld;
+ struct scmi_clock_rate_notif_report *r = report;
+
+ if (sizeof(*p) != payld_sz ||
+ (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
+ evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->clock_id = le32_to_cpu(p->clock_id);
+ r->rate = get_unaligned_le64(&p->rate_low);
+ *src_id = r->clock_id;
+
+ return r;
+}
+
+static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (!ci)
+ return -EINVAL;
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_event clk_events[] = {
+ {
+ .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+ .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+ },
+ {
+ .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
+ .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+ .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+ },
+};
+
+static const struct scmi_event_ops clk_event_ops = {
+ .get_num_sources = scmi_clk_get_num_sources,
+ .set_notify_enabled = scmi_clk_set_notify_enabled,
+ .fill_custom_report = scmi_clk_fill_custom_report,
+};
+
+static const struct scmi_protocol_events clk_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &clk_event_ops,
+ .evts = clk_events,
+ .num_events = ARRAY_SIZE(clk_events),
+};
+
static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int clkid, ret;
struct clock_info *cinfo;
- ph->xops->version_get(ph, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
dev_dbg(ph->dev, "Clock Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -370,7 +560,9 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
if (!cinfo)
return -ENOMEM;
- scmi_clock_protocol_attributes_get(ph, cinfo);
+ ret = scmi_clock_protocol_attributes_get(ph, cinfo);
+ if (ret)
+ return ret;
cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
sizeof(*cinfo->clk), GFP_KERNEL);
@@ -380,7 +572,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
- ret = scmi_clock_attributes_get(ph, clkid, clk);
+ ret = scmi_clock_attributes_get(ph, clkid, clk, version);
if (!ret)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
@@ -394,6 +586,7 @@ static const struct scmi_protocol scmi_clock = {
.owner = THIS_MODULE,
.instance_init = &scmi_clock_protocol_init,
.ops = &clk_proto_ops,
+ .events = &clk_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 4fda84bfab42..61aba7447c32 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -4,7 +4,7 @@
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
*/
#ifndef _SCMI_COMMON_H
#define _SCMI_COMMON_H
@@ -24,38 +24,9 @@
#include <asm/unaligned.h>
+#include "protocols.h"
#include "notify.h"
-#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
-#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
-#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
-#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
-#define MAX_PROTOCOLS_IMP 16
-#define MAX_OPPS 16
-
-enum scmi_common_cmd {
- PROTOCOL_VERSION = 0x0,
- PROTOCOL_ATTRIBUTES = 0x1,
- PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
-};
-
-/**
- * struct scmi_msg_resp_prot_version - Response for a message
- *
- * @minor_version: Minor version of the ABI that firmware supports
- * @major_version: Major version of the ABI that firmware supports
- *
- * In general, ABI version changes follow the rule that minor version increments
- * are backward compatible. Major revision changes in ABI may not be
- * backward compatible.
- *
- * Response to a generic message with message type SCMI_MSG_VERSION
- */
-struct scmi_msg_resp_prot_version {
- __le16 minor_version;
- __le16 major_version;
-};
-
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@@ -80,28 +51,6 @@ struct scmi_msg_resp_prot_version {
#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9
/**
- * struct scmi_msg_hdr - Message(Tx/Rx) header
- *
- * @id: The identifier of the message being sent
- * @protocol_id: The identifier of the protocol used to send @id message
- * @type: The SCMI type for this message
- * @seq: The token to identify the message. When a message returns, the
- * platform returns the whole message header unmodified including the
- * token
- * @status: Status of the transfer once it's complete
- * @poll_completion: Indicate if the transfer needs to be polled for
- * completion or interrupt mode is used
- */
-struct scmi_msg_hdr {
- u8 id;
- u8 protocol_id;
- u8 type;
- u16 seq;
- u32 status;
- bool poll_completion;
-};
-
-/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
@@ -130,72 +79,6 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
hdr->type = MSG_XTRACT_TYPE(msg_hdr);
}
-/**
- * struct scmi_msg - Message(Tx/Rx) structure
- *
- * @buf: Buffer pointer
- * @len: Length of data in the Buffer
- */
-struct scmi_msg {
- void *buf;
- size_t len;
-};
-
-/**
- * struct scmi_xfer - Structure representing a message flow
- *
- * @transfer_id: Unique ID for debug & profiling purpose
- * @hdr: Transmit message header
- * @tx: Transmit message
- * @rx: Receive message, the buffer should be pre-allocated to store
- * message. If request-ACK protocol is used, we can reuse the same
- * buffer for the rx path as we use for the tx path.
- * @done: command message transmit completion event
- * @async_done: pointer to delayed response message received event completion
- * @pending: True for xfers added to @pending_xfers hashtable
- * @node: An hlist_node reference used to store this xfer, alternatively, on
- * the free list @free_xfers or in the @pending_xfers hashtable
- * @users: A refcount to track the active users for this xfer.
- * This is meant to protect against the possibility that, when a command
- * transaction times out concurrently with the reception of a valid
- * response message, the xfer could be finally put on the TX path, and
- * so vanish, while on the RX path scmi_rx_callback() is still
- * processing it: in such a case this refcounting will ensure that, even
- * though the timed-out transaction will anyway cause the command
- * request to be reported as failed by time-out, the underlying xfer
- * cannot be discarded and possibly reused until the last one user on
- * the RX path has released it.
- * @busy: An atomic flag to ensure exclusive write access to this xfer
- * @state: The current state of this transfer, with states transitions deemed
- * valid being:
- * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
- * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
- * (Missing synchronous response is assumed OK and ignored)
- * @lock: A spinlock to protect state and busy fields.
- * @priv: A pointer for transport private usage.
- */
-struct scmi_xfer {
- int transfer_id;
- struct scmi_msg_hdr hdr;
- struct scmi_msg tx;
- struct scmi_msg rx;
- struct completion done;
- struct completion *async_done;
- bool pending;
- struct hlist_node node;
- refcount_t users;
-#define SCMI_XFER_FREE 0
-#define SCMI_XFER_BUSY 1
- atomic_t busy;
-#define SCMI_XFER_SENT_OK 0
-#define SCMI_XFER_RESP_OK 1
-#define SCMI_XFER_DRESP_OK 2
- int state;
- /* A lock to protect state and busy fields */
- spinlock_t lock;
- void *priv;
-};
-
/*
* An helper macro to lookup an xfer from the @pending_xfers hashtable
* using the message sequence number token as a key.
@@ -211,64 +94,6 @@ struct scmi_xfer {
xfer_; \
})
-struct scmi_xfer_ops;
-
-/**
- * struct scmi_protocol_handle - Reference to an initialized protocol instance
- *
- * @dev: A reference to the associated SCMI instance device (handle->dev).
- * @xops: A reference to a struct holding refs to the core xfer operations that
- * can be used by the protocol implementation to generate SCMI messages.
- * @set_priv: A method to set protocol private data for this instance.
- * @get_priv: A method to get protocol private data previously set.
- *
- * This structure represents a protocol initialized against specific SCMI
- * instance and it will be used as follows:
- * - as a parameter fed from the core to the protocol initialization code so
- * that it can access the core xfer operations to build and generate SCMI
- * messages exclusively for the specific underlying protocol instance.
- * - as an opaque handle fed by an SCMI driver user when it tries to access
- * this protocol through its own protocol operations.
- * In this case this handle will be returned as an opaque object together
- * with the related protocol operations when the SCMI driver tries to access
- * the protocol.
- */
-struct scmi_protocol_handle {
- struct device *dev;
- const struct scmi_xfer_ops *xops;
- int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
- void *(*get_priv)(const struct scmi_protocol_handle *ph);
-};
-
-/**
- * struct scmi_xfer_ops - References to the core SCMI xfer operations.
- * @version_get: Get this version protocol.
- * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
- * @reset_rx_to_maxsz: Reset rx size to max transport size.
- * @do_xfer: Do the SCMI transfer.
- * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
- * @xfer_put: Free the xfer slot.
- *
- * Note that all this operations expect a protocol handle as first parameter;
- * they then internally use it to infer the underlying protocol number: this
- * way is not possible for a protocol implementation to forge messages for
- * another protocol.
- */
-struct scmi_xfer_ops {
- int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
- int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
- size_t tx_size, size_t rx_size,
- struct scmi_xfer **p);
- void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
- struct scmi_xfer *xfer);
- int (*do_xfer)(const struct scmi_protocol_handle *ph,
- struct scmi_xfer *xfer);
- int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
- struct scmi_xfer *xfer);
- void (*xfer_put)(const struct scmi_protocol_handle *ph,
- struct scmi_xfer *xfer);
-};
-
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph);
int scmi_handle_put(const struct scmi_handle *handle);
@@ -277,55 +102,9 @@ void scmi_set_handle(struct scmi_device *scmi_dev);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
-
-/**
- * struct scmi_protocol - Protocol descriptor
- * @id: Protocol ID.
- * @owner: Module reference if any.
- * @instance_init: Mandatory protocol initialization function.
- * @instance_deinit: Optional protocol de-initialization function.
- * @ops: Optional reference to the operations provided by the protocol and
- * exposed in scmi_protocol.h.
- * @events: An optional reference to the events supported by this protocol.
- */
-struct scmi_protocol {
- const u8 id;
- struct module *owner;
- const scmi_prot_init_ph_fn_t instance_init;
- const scmi_prot_init_ph_fn_t instance_deinit;
- const void *ops;
- const struct scmi_protocol_events *events;
-};
-
int __init scmi_bus_init(void);
void __exit scmi_bus_exit(void);
-#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
- int __init scmi_##func##_register(void); \
- void __exit scmi_##func##_unregister(void)
-DECLARE_SCMI_REGISTER_UNREGISTER(base);
-DECLARE_SCMI_REGISTER_UNREGISTER(clock);
-DECLARE_SCMI_REGISTER_UNREGISTER(perf);
-DECLARE_SCMI_REGISTER_UNREGISTER(power);
-DECLARE_SCMI_REGISTER_UNREGISTER(reset);
-DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
-DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
-DECLARE_SCMI_REGISTER_UNREGISTER(system);
-
-#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
-static const struct scmi_protocol *__this_proto = &(proto); \
- \
-int __init scmi_##name##_register(void) \
-{ \
- return scmi_protocol_register(__this_proto); \
-} \
- \
-void __exit scmi_##name##_unregister(void) \
-{ \
- scmi_protocol_unregister(__this_proto); \
-}
-
const struct scmi_protocol *scmi_protocol_get(int protocol_id);
void scmi_protocol_put(int protocol_id);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index e17c6568344d..c1922bd650ae 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -128,7 +128,8 @@ struct scmi_protocol_instance {
* usage.
* @protocols_mtx: A mutex to protect protocols instances initialization.
* @protocols_imp: List of protocols implemented, currently maximum of
- * MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * scmi_revision_info.num_protocols elements allocated by the
+ * base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
* @atomic_threshold: Optional system wide DT-configured threshold, expressed
@@ -1102,6 +1103,167 @@ static const struct scmi_xfer_ops xfer_ops = {
.xfer_put = xfer_put,
};
+struct scmi_msg_resp_domain_name_get {
+ __le32 flags;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+/**
+ * scmi_common_extended_name_get - Common helper to get extended resources name
+ * @ph: A protocol handle reference.
+ * @cmd_id: The specific command ID to use.
+ * @res_id: The specific resource ID to use.
+ * @name: A pointer to the preallocated area where the retrieved name will be
+ * stored as a NULL terminated string.
+ * @len: The len in bytes of the @name char array.
+ *
+ * Return: 0 on Succcess
+ */
+static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 res_id, char *name,
+ size_t len)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_domain_name_get *resp;
+
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
+ sizeof(*resp), &t);
+ if (ret)
+ goto out;
+
+ put_unaligned_le32(res_id, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ strscpy(name, resp->name, len);
+
+ ph->xops->xfer_put(ph, t);
+out:
+ if (ret)
+ dev_warn(ph->dev,
+ "Failed to get extended name - id:%u (ret:%d). Using %s\n",
+ res_id, ret, name);
+ return ret;
+}
+
+/**
+ * struct scmi_iterator - Iterator descriptor
+ * @msg: A reference to the message TX buffer; filled by @prepare_message with
+ * a proper custom command payload for each multi-part command request.
+ * @resp: A reference to the response RX buffer; used by @update_state and
+ * @process_response to parse the multi-part replies.
+ * @t: A reference to the underlying xfer initialized and used transparently by
+ * the iterator internal routines.
+ * @ph: A reference to the associated protocol handle to be used.
+ * @ops: A reference to the custom provided iterator operations.
+ * @state: The current iterator state; used and updated in turn by the iterators
+ * internal routines and by the caller-provided @scmi_iterator_ops.
+ * @priv: A reference to optional private data as provided by the caller and
+ * passed back to the @@scmi_iterator_ops.
+ */
+struct scmi_iterator {
+ void *msg;
+ void *resp;
+ struct scmi_xfer *t;
+ const struct scmi_protocol_handle *ph;
+ struct scmi_iterator_ops *ops;
+ struct scmi_iterator_state state;
+ void *priv;
+};
+
+static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
+ struct scmi_iterator_ops *ops,
+ unsigned int max_resources, u8 msg_id,
+ size_t tx_size, void *priv)
+{
+ int ret;
+ struct scmi_iterator *i;
+
+ i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return ERR_PTR(-ENOMEM);
+
+ i->ph = ph;
+ i->ops = ops;
+ i->priv = priv;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
+ if (ret) {
+ devm_kfree(ph->dev, i);
+ return ERR_PTR(ret);
+ }
+
+ i->state.max_resources = max_resources;
+ i->msg = i->t->tx.buf;
+ i->resp = i->t->rx.buf;
+
+ return i;
+}
+
+static int scmi_iterator_run(void *iter)
+{
+ int ret = -EINVAL;
+ struct scmi_iterator_ops *iops;
+ const struct scmi_protocol_handle *ph;
+ struct scmi_iterator_state *st;
+ struct scmi_iterator *i = iter;
+
+ if (!i || !i->ops || !i->ph)
+ return ret;
+
+ iops = i->ops;
+ ph = i->ph;
+ st = &i->state;
+
+ do {
+ iops->prepare_message(i->msg, st->desc_index, i->priv);
+ ret = ph->xops->do_xfer(ph, i->t);
+ if (ret)
+ break;
+
+ ret = iops->update_state(st, i->resp, i->priv);
+ if (ret)
+ break;
+
+ if (st->num_returned > st->max_resources - st->desc_index) {
+ dev_err(ph->dev,
+ "No. of resources can't exceed %d\n",
+ st->max_resources);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (st->loop_idx = 0; st->loop_idx < st->num_returned;
+ st->loop_idx++) {
+ ret = iops->process_response(ph, i->resp, st, i->priv);
+ if (ret)
+ goto out;
+ }
+
+ st->desc_index += st->num_returned;
+ ph->xops->reset_rx_to_maxsz(ph, i->t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (st->num_returned && st->num_remaining);
+
+out:
+ /* Finalize and destroy iterator */
+ ph->xops->xfer_put(ph, i->t);
+ devm_kfree(ph->dev, i);
+
+ return ret;
+}
+
+static const struct scmi_proto_helpers_ops helpers_ops = {
+ .extended_name_get = scmi_common_extended_name_get,
+ .iter_response_init = scmi_iterator_init,
+ .iter_response_run = scmi_iterator_run,
+};
+
/**
* scmi_revision_area_get - Retrieve version memory area.
*
@@ -1162,6 +1324,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
pi->handle = handle;
pi->ph.dev = handle->dev;
pi->ph.xops = &xfer_ops;
+ pi->ph.hops = &helpers_ops;
pi->ph.set_priv = scmi_set_protocol_priv;
pi->ph.get_priv = scmi_get_protocol_priv;
refcount_set(&pi->users, 1);
@@ -1310,11 +1473,12 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
{
int i;
struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_revision_info *rev = handle->version;
if (!info->protocols_imp)
return false;
- for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+ for (i = 0; i < rev->num_protocols; i++)
if (info->protocols_imp[i] == prot_id)
return true;
return false;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 8302a2b4aeeb..b503c22cfd32 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -64,6 +64,22 @@ enum scmi_optee_pta_cmd {
* [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
*/
PTA_SCMI_CMD_GET_CHANNEL = 3,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG
+ * buffer pointed by memref parameters
+ *
+ * [in] value[0].a: Channel handle
+ * [in] memref[1]: Message buffer (MSG and SCMI payload)
+ * [out] memref[2]: Response buffer (MSG and SCMI payload)
+ *
+ * Shared memories used for SCMI message/response are MSG buffers
+ * referenced by param[1] and param[2]. MSG transport protocol
+ * uses a 32bit header to carry SCMI meta-data (protocol ID and
+ * protocol message ID) followed by the effective SCMI message
+ * payload.
+ */
+ PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4,
};
/*
@@ -72,9 +88,17 @@ enum scmi_optee_pta_cmd {
* PTA_SCMI_CAPS_SMT_HEADER
* When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
* shared memory buffers to carry SCMI protocol synchronisation information.
+ *
+ * PTA_SCMI_CAPS_MSG_HEADER
+ * When set, OP-TEE supports command using MSG header protocol in an OP-TEE
+ * shared memory to carry SCMI protocol synchronisation information and SCMI
+ * message payload.
*/
#define PTA_SCMI_CAPS_NONE 0
#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
+#define PTA_SCMI_CAPS_MSG_HEADER BIT(1)
+#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \
+ PTA_SCMI_CAPS_MSG_HEADER)
/**
* struct scmi_optee_channel - Description of an OP-TEE SCMI channel
@@ -85,7 +109,8 @@ enum scmi_optee_pta_cmd {
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
- * @tee_shm: Reference to TEE shared memory or NULL if using static shmem
+ * @req: Shared memory protocol handle for SCMI request and synchronous response
+ * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
struct scmi_optee_channel {
@@ -94,7 +119,10 @@ struct scmi_optee_channel {
u32 caps;
struct mutex mu;
struct scmi_chan_info *cinfo;
- struct scmi_shared_mem __iomem *shmem;
+ union {
+ struct scmi_shared_mem __iomem *shmem;
+ struct scmi_msg_payld *msg;
+ } req;
struct tee_shm *tee_shm;
struct list_head link;
};
@@ -178,8 +206,8 @@ static int get_capabilities(struct scmi_optee_agent *agent)
caps = param[0].u.value.a;
- if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) {
- dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n");
+ if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) {
+ dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n");
return -EOPNOTSUPP;
}
@@ -193,9 +221,14 @@ static int get_channel(struct scmi_optee_channel *channel)
struct device *dev = scmi_optee_private->dev;
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[1] = { };
- unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER;
+ unsigned int caps = 0;
int ret;
+ if (channel->tee_shm)
+ caps = PTA_SCMI_CAPS_MSG_HEADER;
+ else
+ caps = PTA_SCMI_CAPS_SMT_HEADER;
+
arg.func = PTA_SCMI_CMD_GET_CHANNEL;
arg.session = channel->tee_session;
arg.num_params = 1;
@@ -220,25 +253,48 @@ static int get_channel(struct scmi_optee_channel *channel)
static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
{
- struct tee_ioctl_invoke_arg arg = { };
- struct tee_param param[2] = { };
+ struct tee_ioctl_invoke_arg arg = {
+ .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL,
+ .session = channel->tee_session,
+ .num_params = 1,
+ };
+ struct tee_param param[1] = { };
int ret;
- arg.session = channel->tee_session;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = channel->channel_id;
- if (channel->tee_shm) {
- param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
- param[1].u.memref.shm = channel->tee_shm;
- param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
- arg.num_params = 2;
- arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE;
- } else {
- arg.num_params = 1;
- arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL;
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret) {
+ dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
+ channel->channel_id, ret, arg.ret);
+ return -EIO;
}
+ return 0;
+}
+
+static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size)
+{
+ struct tee_ioctl_invoke_arg arg = {
+ .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL,
+ .session = channel->tee_session,
+ .num_params = 3,
+ };
+ struct tee_param param[3] = { };
+ int ret;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = channel->channel_id;
+
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[1].u.memref.shm = channel->tee_shm;
+ param[1].u.memref.size = msg_size;
+
+ param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[2].u.memref.shm = channel->tee_shm;
+ param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
@@ -279,7 +335,26 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- shmem_clear_channel(channel->shmem);
+ if (!channel->tee_shm)
+ shmem_clear_channel(channel->req.shmem);
+}
+
+static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
+{
+ const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
+ void *shbuf;
+
+ channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
+ if (IS_ERR(channel->tee_shm)) {
+ dev_err(channel->cinfo->dev, "shmem allocation failed\n");
+ return -ENOMEM;
+ }
+
+ shbuf = tee_shm_get_va(channel->tee_shm, 0);
+ memset(shbuf, 0, msg_size);
+ channel->req.msg = shbuf;
+
+ return 0;
}
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
@@ -304,8 +379,8 @@ static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
size = resource_size(&res);
- channel->shmem = devm_ioremap(dev, res.start, size);
- if (!channel->shmem) {
+ channel->req.shmem = devm_ioremap(dev, res.start, size);
+ if (!channel->req.shmem) {
dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
ret = -EADDRNOTAVAIL;
goto out;
@@ -325,7 +400,7 @@ static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
return setup_static_shmem(dev, cinfo, channel);
else
- return -ENOMEM;
+ return setup_dynamic_shmem(dev, channel);
}
static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
@@ -405,27 +480,22 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
return 0;
}
-static struct scmi_shared_mem __iomem *
-get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
-{
- if (!chan)
- return NULL;
-
- return chan->shmem;
-}
-
-
static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
int ret;
mutex_lock(&channel->mu);
- shmem_tx_prepare(shmem, xfer);
- ret = invoke_process_smt_channel(channel);
+ if (channel->tee_shm) {
+ msg_tx_prepare(channel->req.msg, xfer);
+ ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
+ } else {
+ shmem_tx_prepare(channel->req.shmem, xfer);
+ ret = invoke_process_smt_channel(channel);
+ }
+
if (ret)
mutex_unlock(&channel->mu);
@@ -436,9 +506,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
- shmem_fetch_response(shmem, xfer);
+ if (channel->tee_shm)
+ msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer);
+ else
+ shmem_fetch_response(channel->req.shmem, xfer);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index f4cd5193b961..8f4051aca220 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
@@ -17,9 +17,11 @@
#include <linux/scmi_protocol.h>
#include <linux/sort.h>
-#include "common.h"
+#include "protocols.h"
#include "notify.h"
+#define MAX_OPPS 16
+
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
PERF_DESCRIBE_LEVELS = 0x4,
@@ -30,6 +32,7 @@ enum scmi_performance_protocol_cmd {
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
PERF_DESCRIBE_FASTCHANNEL = 0xb,
+ PERF_DOMAIN_NAME_GET = 0xc,
};
struct scmi_opp {
@@ -42,6 +45,7 @@ struct scmi_msg_resp_perf_attributes {
__le16 num_domains;
__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
+#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
@@ -54,10 +58,11 @@ struct scmi_msg_resp_perf_domain_attributes {
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_perf_describe_levels {
@@ -166,6 +171,7 @@ struct scmi_perf_info {
u32 version;
int num_domains;
bool power_scale_mw;
+ bool power_scale_uw;
u64 stats_addr;
u32 stats_size;
struct perf_dom_info *dom_info;
@@ -196,6 +202,8 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
+ pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
@@ -207,9 +215,11 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
static int
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
- u32 domain, struct perf_dom_info *dom_info)
+ u32 domain, struct perf_dom_info *dom_info,
+ u32 version)
{
int ret;
+ u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
@@ -223,7 +233,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- u32 flags = le32_to_cpu(attr->flags);
+ flags = le32_to_cpu(attr->flags);
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
@@ -246,6 +256,16 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain,
+ dom_info->name, SCMI_MAX_STR_SIZE);
+
return ret;
}
@@ -256,66 +276,87 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
return t1->perf - t2->perf;
}
-static int
-scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
- struct perf_dom_info *perf_dom)
+struct scmi_perf_ipriv {
+ u32 domain;
+ struct perf_dom_info *perf_dom;
+};
+
+static void iter_perf_levels_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
{
- int ret, cnt;
- u32 tot_opp_cnt = 0;
- u16 num_returned, num_remaining;
- struct scmi_xfer *t;
- struct scmi_opp *opp;
- struct scmi_msg_perf_describe_levels *dom_info;
- struct scmi_msg_resp_perf_describe_levels *level_info;
+ struct scmi_msg_perf_describe_levels *msg = message;
+ const struct scmi_perf_ipriv *p = priv;
- ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
- sizeof(*dom_info), 0, &t);
- if (ret)
- return ret;
+ msg->domain = cpu_to_le32(p->domain);
+ /* Set the number of OPPs to be skipped/already read */
+ msg->level_index = cpu_to_le32(desc_index);
+}
- dom_info = t->tx.buf;
- level_info = t->rx.buf;
+static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_msg_resp_perf_describe_levels *r = response;
- do {
- dom_info->domain = cpu_to_le32(domain);
- /* Set the number of OPPs to be skipped/already read */
- dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+ st->num_returned = le16_to_cpu(r->num_returned);
+ st->num_remaining = le16_to_cpu(r->num_remaining);
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- break;
+ return 0;
+}
- num_returned = le16_to_cpu(level_info->num_returned);
- num_remaining = le16_to_cpu(level_info->num_remaining);
- if (tot_opp_cnt + num_returned > MAX_OPPS) {
- dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
- break;
- }
+static int
+iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ struct scmi_opp *opp;
+ const struct scmi_msg_resp_perf_describe_levels *r = response;
+ struct scmi_perf_ipriv *p = priv;
- opp = &perf_dom->opp[tot_opp_cnt];
- for (cnt = 0; cnt < num_returned; cnt++, opp++) {
- opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
- opp->power = le32_to_cpu(level_info->opp[cnt].power);
- opp->trans_latency_us = le16_to_cpu
- (level_info->opp[cnt].transition_latency_us);
+ opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
+ opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
+ opp->trans_latency_us =
+ le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
+ p->perf_dom->opp_count++;
- dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
- opp->perf, opp->power, opp->trans_latency_us);
- }
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
+ opp->perf, opp->power, opp->trans_latency_us);
- tot_opp_cnt += num_returned;
+ return 0;
+}
- ph->xops->reset_rx_to_maxsz(ph, t);
- /*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
- */
- } while (num_returned && num_remaining);
+static int
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
+ struct perf_dom_info *perf_dom)
+{
+ int ret;
+ void *iter;
+ struct scmi_msg_perf_describe_levels *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_perf_levels_prepare_message,
+ .update_state = iter_perf_levels_update_state,
+ .process_response = iter_perf_levels_process_response,
+ };
+ struct scmi_perf_ipriv ppriv = {
+ .domain = domain,
+ .perf_dom = perf_dom,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
+ PERF_DESCRIBE_LEVELS,
+ sizeof(*msg), &ppriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
- perf_dom->opp_count = tot_opp_cnt;
- ph->xops->xfer_put(ph, t);
+ if (perf_dom->opp_count)
+ sort(perf_dom->opp, perf_dom->opp_count,
+ sizeof(struct scmi_opp), opp_cmp_func, NULL);
- sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
}
@@ -382,6 +423,9 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
+ return -EINVAL;
+
if (dom->fc_info && dom->fc_info->limit_set_addr) {
iowrite32(max_perf, dom->fc_info->limit_set_addr);
iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
@@ -873,11 +917,13 @@ static const struct scmi_protocol_events perf_protocol_events = {
static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
{
- int domain;
+ int domain, ret;
u32 version;
struct scmi_perf_info *pinfo;
- ph->xops->version_get(ph, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
dev_dbg(ph->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -886,7 +932,9 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
- scmi_perf_attributes_get(ph, pinfo);
+ ret = scmi_perf_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
@@ -896,7 +944,7 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
- scmi_perf_domain_attributes_get(ph, domain, dom);
+ scmi_perf_domain_attributes_get(ph, domain, dom, version);
scmi_perf_describe_levels_get(ph, domain, dom);
if (dom->perf_fastchannels)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index ad2ab080f344..964882cc8747 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Power Protocol
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
-#include "common.h"
+#include "protocols.h"
#include "notify.h"
enum scmi_power_protocol_cmd {
@@ -18,6 +18,7 @@ enum scmi_power_protocol_cmd {
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
+ POWER_DOMAIN_NAME_GET = 0x8,
};
struct scmi_msg_resp_power_attributes {
@@ -33,7 +34,8 @@ struct scmi_msg_resp_power_domain_attributes {
#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31))
#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30))
#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29))
- u8 name[SCMI_MAX_STR_SIZE];
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_power_set_state {
@@ -97,9 +99,11 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
static int
scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
- u32 domain, struct power_dom_info *dom_info)
+ u32 domain, struct power_dom_info *dom_info,
+ u32 version)
{
int ret;
+ u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
@@ -113,15 +117,26 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- u32 flags = le32_to_cpu(attr->flags);
+ flags = le32_to_cpu(attr->flags);
dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
-
ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(flags)) {
+ ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET,
+ domain, dom_info->name,
+ SCMI_MAX_STR_SIZE);
+ }
+
return ret;
}
@@ -174,8 +189,9 @@ static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
return pi->num_domains;
}
-static char *scmi_power_name_get(const struct scmi_protocol_handle *ph,
- u32 domain)
+static const char *
+scmi_power_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
struct scmi_power_info *pi = ph->get_priv(ph);
struct power_dom_info *dom = pi->dom_info + domain;
@@ -280,11 +296,13 @@ static const struct scmi_protocol_events power_protocol_events = {
static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
{
- int domain;
+ int domain, ret;
u32 version;
struct scmi_power_info *pinfo;
- ph->xops->version_get(ph, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
dev_dbg(ph->dev, "Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -293,7 +311,9 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
- scmi_power_attributes_get(ph, pinfo);
+ ret = scmi_power_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
@@ -303,7 +323,7 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
- scmi_power_domain_attributes_get(ph, domain, dom);
+ scmi_power_domain_attributes_get(ph, domain, dom, version);
}
pinfo->version = version;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
new file mode 100644
index 000000000000..73304af5ec4a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * protocols common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_PROTOCOLS_H
+#define _SCMI_PROTOCOLS_H
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#define SCMI_SHORT_NAME_MAX_SIZE 16
+
+#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
+#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))))
+
+enum scmi_common_cmd {
+ PROTOCOL_VERSION = 0x0,
+ PROTOCOL_ATTRIBUTES = 0x1,
+ PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+ __le16 minor_version;
+ __le16 major_version;
+};
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+ void *buf;
+ size_t len;
+};
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @type: The SCMI type for this message
+ * @seq: The token to identify the message. When a message returns, the
+ * platform returns the whole message header unmodified including the
+ * token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ * completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+ u8 id;
+ u8 protocol_id;
+ u8 type;
+ u16 seq;
+ u32 status;
+ bool poll_completion;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @transfer_id: Unique ID for debug & profiling purpose
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ * message. If request-ACK protocol is used, we can reuse the same
+ * buffer for the rx path as we use for the tx path.
+ * @done: command message transmit completion event
+ * @async_done: pointer to delayed response message received event completion
+ * @pending: True for xfers added to @pending_xfers hashtable
+ * @node: An hlist_node reference used to store this xfer, alternatively, on
+ * the free list @free_xfers or in the @pending_xfers hashtable
+ * @users: A refcount to track the active users for this xfer.
+ * This is meant to protect against the possibility that, when a command
+ * transaction times out concurrently with the reception of a valid
+ * response message, the xfer could be finally put on the TX path, and
+ * so vanish, while on the RX path scmi_rx_callback() is still
+ * processing it: in such a case this refcounting will ensure that, even
+ * though the timed-out transaction will anyway cause the command
+ * request to be reported as failed by time-out, the underlying xfer
+ * cannot be discarded and possibly reused until the last one user on
+ * the RX path has released it.
+ * @busy: An atomic flag to ensure exclusive write access to this xfer
+ * @state: The current state of this transfer, with states transitions deemed
+ * valid being:
+ * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
+ * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
+ * (Missing synchronous response is assumed OK and ignored)
+ * @lock: A spinlock to protect state and busy fields.
+ * @priv: A pointer for transport private usage.
+ */
+struct scmi_xfer {
+ int transfer_id;
+ struct scmi_msg_hdr hdr;
+ struct scmi_msg tx;
+ struct scmi_msg rx;
+ struct completion done;
+ struct completion *async_done;
+ bool pending;
+ struct hlist_node node;
+ refcount_t users;
+#define SCMI_XFER_FREE 0
+#define SCMI_XFER_BUSY 1
+ atomic_t busy;
+#define SCMI_XFER_SENT_OK 0
+#define SCMI_XFER_RESP_OK 1
+#define SCMI_XFER_DRESP_OK 2
+ int state;
+ /* A lock to protect state and busy fields */
+ spinlock_t lock;
+ void *priv;
+};
+
+struct scmi_xfer_ops;
+struct scmi_proto_helpers_ops;
+
+/**
+ * struct scmi_protocol_handle - Reference to an initialized protocol instance
+ *
+ * @dev: A reference to the associated SCMI instance device (handle->dev).
+ * @xops: A reference to a struct holding refs to the core xfer operations that
+ * can be used by the protocol implementation to generate SCMI messages.
+ * @set_priv: A method to set protocol private data for this instance.
+ * @get_priv: A method to get protocol private data previously set.
+ *
+ * This structure represents a protocol initialized against specific SCMI
+ * instance and it will be used as follows:
+ * - as a parameter fed from the core to the protocol initialization code so
+ * that it can access the core xfer operations to build and generate SCMI
+ * messages exclusively for the specific underlying protocol instance.
+ * - as an opaque handle fed by an SCMI driver user when it tries to access
+ * this protocol through its own protocol operations.
+ * In this case this handle will be returned as an opaque object together
+ * with the related protocol operations when the SCMI driver tries to access
+ * the protocol.
+ */
+struct scmi_protocol_handle {
+ struct device *dev;
+ const struct scmi_xfer_ops *xops;
+ const struct scmi_proto_helpers_ops *hops;
+ int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+ void *(*get_priv)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_iterator_state - Iterator current state descriptor
+ * @desc_index: Starting index for the current mulit-part request.
+ * @num_returned: Number of returned items in the last multi-part reply.
+ * @num_remaining: Number of remaining items in the multi-part message.
+ * @max_resources: Maximum acceptable number of items, configured by the caller
+ * depending on the underlying resources that it is querying.
+ * @loop_idx: The iterator loop index in the current multi-part reply.
+ * @priv: Optional pointer to some additional state-related private data setup
+ * by the caller during the iterations.
+ */
+struct scmi_iterator_state {
+ unsigned int desc_index;
+ unsigned int num_returned;
+ unsigned int num_remaining;
+ unsigned int max_resources;
+ unsigned int loop_idx;
+ void *priv;
+};
+
+/**
+ * struct scmi_iterator_ops - Custom iterator operations
+ * @prepare_message: An operation to provide the custom logic to fill in the
+ * SCMI command request pointed by @message. @desc_index is
+ * a reference to the next index to use in the multi-part
+ * request.
+ * @update_state: An operation to provide the custom logic to update the
+ * iterator state from the actual message response.
+ * @process_response: An operation to provide the custom logic needed to process
+ * each chunk of the multi-part message.
+ */
+struct scmi_iterator_ops {
+ void (*prepare_message)(void *message, unsigned int desc_index,
+ const void *priv);
+ int (*update_state)(struct scmi_iterator_state *st,
+ const void *response, void *priv);
+ int (*process_response)(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv);
+};
+
+/**
+ * struct scmi_proto_helpers_ops - References to common protocol helpers
+ * @extended_name_get: A common helper function to retrieve extended naming
+ * for the specified resource using the specified command.
+ * Result is returned as a NULL terminated string in the
+ * pre-allocated area pointed to by @name with maximum
+ * capacity of @len bytes.
+ * @iter_response_init: A common helper to initialize a generic iterator to
+ * parse multi-message responses: when run the iterator
+ * will take care to send the initial command request as
+ * specified by @msg_id and @tx_size and then to parse the
+ * multi-part responses using the custom operations
+ * provided in @ops.
+ * @iter_response_run: A common helper to trigger the run of a previously
+ * initialized iterator.
+ */
+struct scmi_proto_helpers_ops {
+ int (*extended_name_get)(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 res_id, char *name, size_t len);
+ void *(*iter_response_init)(const struct scmi_protocol_handle *ph,
+ struct scmi_iterator_ops *ops,
+ unsigned int max_resources, u8 msg_id,
+ size_t tx_size, void *priv);
+ int (*iter_response_run)(void *iter);
+};
+
+/**
+ * struct scmi_xfer_ops - References to the core SCMI xfer operations.
+ * @version_get: Get this version protocol.
+ * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
+ * @reset_rx_to_maxsz: Reset rx size to max transport size.
+ * @do_xfer: Do the SCMI transfer.
+ * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
+ * @xfer_put: Free the xfer slot.
+ *
+ * Note that all this operations expect a protocol handle as first parameter;
+ * they then internally use it to infer the underlying protocol number: this
+ * way is not possible for a protocol implementation to forge messages for
+ * another protocol.
+ */
+struct scmi_xfer_ops {
+ int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
+ int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
+ size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p);
+ void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ void (*xfer_put)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+};
+
+typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
+
+/**
+ * struct scmi_protocol - Protocol descriptor
+ * @id: Protocol ID.
+ * @owner: Module reference if any.
+ * @instance_init: Mandatory protocol initialization function.
+ * @instance_deinit: Optional protocol de-initialization function.
+ * @ops: Optional reference to the operations provided by the protocol and
+ * exposed in scmi_protocol.h.
+ * @events: An optional reference to the events supported by this protocol.
+ */
+struct scmi_protocol {
+ const u8 id;
+ struct module *owner;
+ const scmi_prot_init_ph_fn_t instance_init;
+ const scmi_prot_init_ph_fn_t instance_deinit;
+ const void *ops;
+ const struct scmi_protocol_events *events;
+};
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
+static const struct scmi_protocol *__this_proto = &(proto); \
+ \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register(__this_proto); \
+} \
+ \
+void __exit scmi_##name##_unregister(void) \
+{ \
+ scmi_protocol_unregister(__this_proto); \
+}
+
+#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
+ int __init scmi_##func##_register(void); \
+ void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(base);
+DECLARE_SCMI_REGISTER_UNREGISTER(clock);
+DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(power);
+DECLARE_SCMI_REGISTER_UNREGISTER(reset);
+DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
+DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
+DECLARE_SCMI_REGISTER_UNREGISTER(system);
+
+#endif /* _SCMI_PROTOCOLS_H */
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 9bf2478ec6d1..a420a9102094 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Reset Protocol
*
- * Copyright (C) 2019-2021 ARM Ltd.
+ * Copyright (C) 2019-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
@@ -10,13 +10,14 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
-#include "common.h"
+#include "protocols.h"
#include "notify.h"
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
RESET = 0x4,
RESET_NOTIFY = 0x5,
+ RESET_DOMAIN_NAME_GET = 0x6,
};
#define NUM_RESET_DOMAIN_MASK 0xffff
@@ -26,8 +27,9 @@ struct scmi_msg_resp_reset_domain_attributes {
__le32 attributes;
#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
__le32 latency;
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_reset_domain_reset {
@@ -89,9 +91,11 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
static int
scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
- u32 domain, struct reset_dom_info *dom_info)
+ u32 domain, struct reset_dom_info *dom_info,
+ u32 version)
{
int ret;
+ u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
@@ -105,7 +109,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- u32 attributes = le32_to_cpu(attr->attributes);
+ attributes = le32_to_cpu(attr->attributes);
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
@@ -116,6 +120,16 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain,
+ dom_info->name, SCMI_MAX_STR_SIZE);
+
return ret;
}
@@ -126,8 +140,8 @@ static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
return pi->num_domains;
}
-static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph,
- u32 domain)
+static const char *
+scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain)
{
struct scmi_reset_info *pi = ph->get_priv(ph);
@@ -293,11 +307,13 @@ static const struct scmi_protocol_events reset_protocol_events = {
static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
{
- int domain;
+ int domain, ret;
u32 version;
struct scmi_reset_info *pinfo;
- ph->xops->version_get(ph, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
dev_dbg(ph->dev, "Reset Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -306,7 +322,9 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
- scmi_reset_attributes_get(ph, pinfo);
+ ret = scmi_reset_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
@@ -316,7 +334,7 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct reset_dom_info *dom = pinfo->dom_info + domain;
- scmi_reset_domain_attributes_get(ph, domain, dom);
+ scmi_reset_domain_attributes_get(ph, domain, dom, version);
}
pinfo->version = version;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index cdbb287bd8bc..21e0ce89b153 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
-#include "common.h"
+#include "protocols.h"
#include "notify.h"
#define SCMI_MAX_NUM_SENSOR_AXIS 63
@@ -27,6 +27,8 @@ enum scmi_sensor_protocol_cmd {
SENSOR_CONFIG_GET = 0x9,
SENSOR_CONFIG_SET = 0xA,
SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
+ SENSOR_NAME_GET = 0xC,
+ SENSOR_AXIS_NAME_GET = 0xD,
};
struct scmi_msg_resp_sensor_attributes {
@@ -63,6 +65,10 @@ struct scmi_msg_resp_attrs {
__le32 max_range_high;
};
+struct scmi_msg_sensor_description {
+ __le32 desc_index;
+};
+
struct scmi_msg_resp_sensor_description {
__le16 num_returned;
__le16 num_remaining;
@@ -71,6 +77,7 @@ struct scmi_msg_resp_sensor_description {
__le32 attributes_low;
/* Common attributes_low macros */
#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x))
+#define SUPPORTS_EXTENDED_NAMES(x) FIELD_GET(BIT(29), (x))
#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x))
__le32 attributes_high;
/* Common attributes_high macros */
@@ -78,7 +85,7 @@ struct scmi_msg_resp_sensor_description {
#define SENSOR_SCALE_SIGN BIT(4)
#define SENSOR_SCALE_EXTEND GENMASK(31, 5)
#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x))
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
/* only for version > 2.0 */
__le32 power;
__le32 resolution;
@@ -111,13 +118,22 @@ struct scmi_msg_resp_sensor_axis_description {
struct scmi_axis_descriptor {
__le32 id;
__le32 attributes_low;
+#define SUPPORTS_EXTENDED_AXIS_NAMES(x) FIELD_GET(BIT(9), (x))
__le32 attributes_high;
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 resolution;
struct scmi_msg_resp_attrs attrs;
} desc[];
};
+struct scmi_msg_resp_sensor_axis_names_description {
+ __le32 num_axis_flags;
+ struct scmi_sensor_axis_name_descriptor {
+ __le32 axis_id;
+ u8 name[SCMI_MAX_STR_SIZE];
+ } desc[];
+};
+
/* Base scmi_axis_descriptor size excluding extended attrs after name */
#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28
@@ -231,335 +247,414 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
}
static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
- struct scmi_msg_resp_attrs *in)
+ const struct scmi_msg_resp_attrs *in)
{
out->min_range = get_unaligned_le64((void *)&in->min_range_low);
out->max_range = get_unaligned_le64((void *)&in->max_range_low);
}
-static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
- struct scmi_sensor_info *s)
-{
- int ret, cnt;
- u32 desc_index = 0;
- u16 num_returned, num_remaining;
- struct scmi_xfer *ti;
- struct scmi_msg_resp_sensor_list_update_intervals *buf;
- struct scmi_msg_sensor_list_update_intervals *msg;
-
- ret = ph->xops->xfer_get_init(ph, SENSOR_LIST_UPDATE_INTERVALS,
- sizeof(*msg), 0, &ti);
- if (ret)
- return ret;
-
- buf = ti->rx.buf;
- do {
- u32 flags;
-
- msg = ti->tx.buf;
- /* Set the number of sensors to be skipped/already read */
- msg->id = cpu_to_le32(s->id);
- msg->index = cpu_to_le32(desc_index);
+struct scmi_sens_ipriv {
+ void *priv;
+ struct device *dev;
+};
- ret = ph->xops->do_xfer(ph, ti);
- if (ret)
- break;
+static void iter_intervals_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *p)
+{
+ struct scmi_msg_sensor_list_update_intervals *msg = message;
+ const struct scmi_sensor_info *s;
- flags = le32_to_cpu(buf->num_intervals_flags);
- num_returned = NUM_INTERVALS_RETURNED(flags);
- num_remaining = NUM_INTERVALS_REMAINING(flags);
+ s = ((const struct scmi_sens_ipriv *)p)->priv;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->index = cpu_to_le32(desc_index);
+}
- /*
- * Max intervals is not declared previously anywhere so we
- * assume it's returned+remaining.
- */
- if (!s->intervals.count) {
- s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
- s->intervals.count = num_returned + num_remaining;
- /* segmented intervals are reported in one triplet */
- if (s->intervals.segmented &&
- (num_remaining || num_returned != 3)) {
- dev_err(ph->dev,
- "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
- s->id, s->intervals.count);
+static int iter_intervals_update_state(struct scmi_iterator_state *st,
+ const void *response, void *p)
+{
+ u32 flags;
+ struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
+ struct device *dev = ((struct scmi_sens_ipriv *)p)->dev;
+ const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
+
+ flags = le32_to_cpu(r->num_intervals_flags);
+ st->num_returned = NUM_INTERVALS_RETURNED(flags);
+ st->num_remaining = NUM_INTERVALS_REMAINING(flags);
+
+ /*
+ * Max intervals is not declared previously anywhere so we
+ * assume it's returned+remaining on first call.
+ */
+ if (!st->max_resources) {
+ s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
+ s->intervals.count = st->num_returned + st->num_remaining;
+ /* segmented intervals are reported in one triplet */
+ if (s->intervals.segmented &&
+ (st->num_remaining || st->num_returned != 3)) {
+ dev_err(dev,
+ "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
+ s->id, s->intervals.count);
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ return -EINVAL;
+ }
+ /* Direct allocation when exceeding pre-allocated */
+ if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
+ s->intervals.desc =
+ devm_kcalloc(dev,
+ s->intervals.count,
+ sizeof(*s->intervals.desc),
+ GFP_KERNEL);
+ if (!s->intervals.desc) {
s->intervals.segmented = false;
s->intervals.count = 0;
- ret = -EINVAL;
- break;
- }
- /* Direct allocation when exceeding pre-allocated */
- if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
- s->intervals.desc =
- devm_kcalloc(ph->dev,
- s->intervals.count,
- sizeof(*s->intervals.desc),
- GFP_KERNEL);
- if (!s->intervals.desc) {
- s->intervals.segmented = false;
- s->intervals.count = 0;
- ret = -ENOMEM;
- break;
- }
+ return -ENOMEM;
}
- } else if (desc_index + num_returned > s->intervals.count) {
- dev_err(ph->dev,
- "No. of update intervals can't exceed %d\n",
- s->intervals.count);
- ret = -EINVAL;
- break;
}
- for (cnt = 0; cnt < num_returned; cnt++)
- s->intervals.desc[desc_index + cnt] =
- le32_to_cpu(buf->intervals[cnt]);
+ st->max_resources = s->intervals.count;
+ }
+
+ return 0;
+}
- desc_index += num_returned;
+static int
+iter_intervals_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *p)
+{
+ const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
+ struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
- ph->xops->reset_rx_to_maxsz(ph, ti);
- /*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
- */
- } while (num_returned && num_remaining);
+ s->intervals.desc[st->desc_index + st->loop_idx] =
+ le32_to_cpu(r->intervals[st->loop_idx]);
- ph->xops->xfer_put(ph, ti);
- return ret;
+ return 0;
}
-static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
+static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
- int ret, cnt;
- u32 desc_index = 0;
- u16 num_returned, num_remaining;
- struct scmi_xfer *te;
- struct scmi_msg_resp_sensor_axis_description *buf;
- struct scmi_msg_sensor_axis_description_get *msg;
+ void *iter;
+ struct scmi_msg_sensor_list_update_intervals *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_intervals_prepare_message,
+ .update_state = iter_intervals_update_state,
+ .process_response = iter_intervals_process_response,
+ };
+ struct scmi_sens_ipriv upriv = {
+ .priv = s,
+ .dev = ph->dev,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
+ SENSOR_LIST_UPDATE_INTERVALS,
+ sizeof(*msg), &upriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
- s->axis = devm_kcalloc(ph->dev, s->num_axis,
- sizeof(*s->axis), GFP_KERNEL);
- if (!s->axis)
- return -ENOMEM;
+static void iter_axes_desc_prepare_message(void *message,
+ const unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_sensor_axis_description_get *msg = message;
+ const struct scmi_sensor_info *s = priv;
- ret = ph->xops->xfer_get_init(ph, SENSOR_AXIS_DESCRIPTION_GET,
- sizeof(*msg), 0, &te);
- if (ret)
- return ret;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->axis_desc_index = cpu_to_le32(desc_index);
+}
- buf = te->rx.buf;
- do {
- u32 flags;
- struct scmi_axis_descriptor *adesc;
+static int
+iter_axes_desc_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ const struct scmi_msg_resp_sensor_axis_description *r = response;
- msg = te->tx.buf;
- /* Set the number of sensors to be skipped/already read */
- msg->id = cpu_to_le32(s->id);
- msg->axis_desc_index = cpu_to_le32(desc_index);
+ flags = le32_to_cpu(r->num_axis_flags);
+ st->num_returned = NUM_AXIS_RETURNED(flags);
+ st->num_remaining = NUM_AXIS_REMAINING(flags);
+ st->priv = (void *)&r->desc[0];
- ret = ph->xops->do_xfer(ph, te);
- if (ret)
- break;
+ return 0;
+}
- flags = le32_to_cpu(buf->num_axis_flags);
- num_returned = NUM_AXIS_RETURNED(flags);
- num_remaining = NUM_AXIS_REMAINING(flags);
+static int
+iter_axes_desc_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ u32 attrh, attrl;
+ struct scmi_sensor_axis_info *a;
+ size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+ struct scmi_sensor_info *s = priv;
+ const struct scmi_axis_descriptor *adesc = st->priv;
- if (desc_index + num_returned > s->num_axis) {
- dev_err(ph->dev, "No. of axis can't exceed %d\n",
- s->num_axis);
- break;
- }
+ attrl = le32_to_cpu(adesc->attributes_low);
- adesc = &buf->desc[0];
- for (cnt = 0; cnt < num_returned; cnt++) {
- u32 attrh, attrl;
- struct scmi_sensor_axis_info *a;
- size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+ a = &s->axis[st->desc_index + st->loop_idx];
+ a->id = le32_to_cpu(adesc->id);
+ a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
- attrl = le32_to_cpu(adesc->attributes_low);
+ attrh = le32_to_cpu(adesc->attributes_high);
+ a->scale = S32_EXT(SENSOR_SCALE(attrh));
+ a->type = SENSOR_TYPE(attrh);
+ strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
- a = &s->axis[desc_index + cnt];
+ if (a->extended_attrs) {
+ unsigned int ares = le32_to_cpu(adesc->resolution);
- a->id = le32_to_cpu(adesc->id);
- a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+ a->resolution = SENSOR_RES(ares);
+ a->exponent = S32_EXT(SENSOR_RES_EXP(ares));
+ dsize += sizeof(adesc->resolution);
- attrh = le32_to_cpu(adesc->attributes_high);
- a->scale = S32_EXT(SENSOR_SCALE(attrh));
- a->type = SENSOR_TYPE(attrh);
- strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+ scmi_parse_range_attrs(&a->attrs, &adesc->attrs);
+ dsize += sizeof(adesc->attrs);
+ }
+ st->priv = ((u8 *)adesc + dsize);
+
+ return 0;
+}
- if (a->extended_attrs) {
- unsigned int ares =
- le32_to_cpu(adesc->resolution);
+static int
+iter_axes_extended_name_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ const struct scmi_msg_resp_sensor_axis_names_description *r = response;
- a->resolution = SENSOR_RES(ares);
- a->exponent =
- S32_EXT(SENSOR_RES_EXP(ares));
- dsize += sizeof(adesc->resolution);
+ flags = le32_to_cpu(r->num_axis_flags);
+ st->num_returned = NUM_AXIS_RETURNED(flags);
+ st->num_remaining = NUM_AXIS_REMAINING(flags);
+ st->priv = (void *)&r->desc[0];
- scmi_parse_range_attrs(&a->attrs,
- &adesc->attrs);
- dsize += sizeof(adesc->attrs);
- }
+ return 0;
+}
- adesc = (typeof(adesc))((u8 *)adesc + dsize);
- }
+static int
+iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st,
+ void *priv)
+{
+ struct scmi_sensor_axis_info *a;
+ const struct scmi_sensor_info *s = priv;
+ struct scmi_sensor_axis_name_descriptor *adesc = st->priv;
- desc_index += num_returned;
+ a = &s->axis[st->desc_index + st->loop_idx];
+ strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+ st->priv = ++adesc;
- ph->xops->reset_rx_to_maxsz(ph, te);
- /*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
- */
- } while (num_returned && num_remaining);
+ return 0;
+}
- ph->xops->xfer_put(ph, te);
- return ret;
+static int
+scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph,
+ struct scmi_sensor_info *s)
+{
+ void *iter;
+ struct scmi_msg_sensor_axis_description_get *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_axes_desc_prepare_message,
+ .update_state = iter_axes_extended_name_update_state,
+ .process_response = iter_axes_extended_name_process_response,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
+ SENSOR_AXIS_NAME_GET,
+ sizeof(*msg), s);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
}
-static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
- struct sensors_info *si)
+static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
+ struct scmi_sensor_info *s,
+ u32 version)
{
- int ret, cnt;
- u32 desc_index = 0;
- u16 num_returned, num_remaining;
- struct scmi_xfer *t;
- struct scmi_msg_resp_sensor_description *buf;
+ int ret;
+ void *iter;
+ struct scmi_msg_sensor_axis_description_get *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_axes_desc_prepare_message,
+ .update_state = iter_axes_desc_update_state,
+ .process_response = iter_axes_desc_process_response,
+ };
- ret = ph->xops->xfer_get_init(ph, SENSOR_DESCRIPTION_GET,
- sizeof(__le32), 0, &t);
+ s->axis = devm_kcalloc(ph->dev, s->num_axis,
+ sizeof(*s->axis), GFP_KERNEL);
+ if (!s->axis)
+ return -ENOMEM;
+
+ iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
+ SENSOR_AXIS_DESCRIPTION_GET,
+ sizeof(*msg), s);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
if (ret)
return ret;
- buf = t->rx.buf;
-
- do {
- struct scmi_sensor_descriptor *sdesc;
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3)
+ ret = scmi_sensor_axis_extended_names_get(ph, s);
- /* Set the number of sensors to be skipped/already read */
- put_unaligned_le32(desc_index, t->tx.buf);
+ return ret;
+}
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- break;
+static void iter_sens_descr_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_sensor_description *msg = message;
- num_returned = le16_to_cpu(buf->num_returned);
- num_remaining = le16_to_cpu(buf->num_remaining);
+ msg->desc_index = cpu_to_le32(desc_index);
+}
- if (desc_index + num_returned > si->num_sensors) {
- dev_err(ph->dev, "No. of sensors can't exceed %d",
- si->num_sensors);
- break;
- }
+static int iter_sens_descr_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_msg_resp_sensor_description *r = response;
- sdesc = &buf->desc[0];
- for (cnt = 0; cnt < num_returned; cnt++) {
- u32 attrh, attrl;
- struct scmi_sensor_info *s;
- size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
-
- s = &si->sensors[desc_index + cnt];
- s->id = le32_to_cpu(sdesc->id);
-
- attrl = le32_to_cpu(sdesc->attributes_low);
- /* common bitfields parsing */
- s->async = SUPPORTS_ASYNC_READ(attrl);
- s->num_trip_points = NUM_TRIP_POINTS(attrl);
- /**
- * only SCMIv3.0 specific bitfield below.
- * Such bitfields are assumed to be zeroed on non
- * relevant fw versions...assuming fw not buggy !
- */
- s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
- s->timestamped = SUPPORTS_TIMESTAMP(attrl);
- if (s->timestamped)
- s->tstamp_scale =
- S32_EXT(SENSOR_TSTAMP_EXP(attrl));
- s->extended_scalar_attrs =
- SUPPORTS_EXTEND_ATTRS(attrl);
-
- attrh = le32_to_cpu(sdesc->attributes_high);
- /* common bitfields parsing */
- s->scale = S32_EXT(SENSOR_SCALE(attrh));
- s->type = SENSOR_TYPE(attrh);
- /* Use pre-allocated pool wherever possible */
- s->intervals.desc = s->intervals.prealloc_pool;
- if (si->version == SCMIv2_SENSOR_PROTOCOL) {
- s->intervals.segmented = false;
- s->intervals.count = 1;
- /*
- * Convert SCMIv2.0 update interval format to
- * SCMIv3.0 to be used as the common exposed
- * descriptor, accessible via common macros.
- */
- s->intervals.desc[0] =
- (SENSOR_UPDATE_BASE(attrh) << 5) |
- SENSOR_UPDATE_SCALE(attrh);
- } else {
- /*
- * From SCMIv3.0 update intervals are retrieved
- * via a dedicated (optional) command.
- * Since the command is optional, on error carry
- * on without any update interval.
- */
- if (scmi_sensor_update_intervals(ph, s))
- dev_dbg(ph->dev,
- "Update Intervals not available for sensor ID:%d\n",
- s->id);
- }
- /**
- * only > SCMIv2.0 specific bitfield below.
- * Such bitfields are assumed to be zeroed on non
- * relevant fw versions...assuming fw not buggy !
- */
- s->num_axis = min_t(unsigned int,
- SUPPORTS_AXIS(attrh) ?
- SENSOR_AXIS_NUMBER(attrh) : 0,
- SCMI_MAX_NUM_SENSOR_AXIS);
- strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
-
- if (s->extended_scalar_attrs) {
- s->sensor_power = le32_to_cpu(sdesc->power);
- dsize += sizeof(sdesc->power);
- /* Only for sensors reporting scalar values */
- if (s->num_axis == 0) {
- unsigned int sres =
- le32_to_cpu(sdesc->resolution);
-
- s->resolution = SENSOR_RES(sres);
- s->exponent =
- S32_EXT(SENSOR_RES_EXP(sres));
- dsize += sizeof(sdesc->resolution);
-
- scmi_parse_range_attrs(&s->scalar_attrs,
- &sdesc->scalar_attrs);
- dsize += sizeof(sdesc->scalar_attrs);
- }
- }
- if (s->num_axis > 0) {
- ret = scmi_sensor_axis_description(ph, s);
- if (ret)
- goto out;
- }
+ st->num_returned = le16_to_cpu(r->num_returned);
+ st->num_remaining = le16_to_cpu(r->num_remaining);
+ st->priv = (void *)&r->desc[0];
- sdesc = (typeof(sdesc))((u8 *)sdesc + dsize);
- }
+ return 0;
+}
- desc_index += num_returned;
+static int
+iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
- ph->xops->reset_rx_to_maxsz(ph, t);
+{
+ int ret = 0;
+ u32 attrh, attrl;
+ size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
+ struct scmi_sensor_info *s;
+ struct sensors_info *si = priv;
+ const struct scmi_sensor_descriptor *sdesc = st->priv;
+
+ s = &si->sensors[st->desc_index + st->loop_idx];
+ s->id = le32_to_cpu(sdesc->id);
+
+ attrl = le32_to_cpu(sdesc->attributes_low);
+ /* common bitfields parsing */
+ s->async = SUPPORTS_ASYNC_READ(attrl);
+ s->num_trip_points = NUM_TRIP_POINTS(attrl);
+ /**
+ * only SCMIv3.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ s->timestamped = SUPPORTS_TIMESTAMP(attrl);
+ if (s->timestamped)
+ s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl));
+ s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(sdesc->attributes_high);
+ /* common bitfields parsing */
+ s->scale = S32_EXT(SENSOR_SCALE(attrh));
+ s->type = SENSOR_TYPE(attrh);
+ /* Use pre-allocated pool wherever possible */
+ s->intervals.desc = s->intervals.prealloc_pool;
+ if (si->version == SCMIv2_SENSOR_PROTOCOL) {
+ s->intervals.segmented = false;
+ s->intervals.count = 1;
+ /*
+ * Convert SCMIv2.0 update interval format to
+ * SCMIv3.0 to be used as the common exposed
+ * descriptor, accessible via common macros.
+ */
+ s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) |
+ SENSOR_UPDATE_SCALE(attrh);
+ } else {
/*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
+ * From SCMIv3.0 update intervals are retrieved
+ * via a dedicated (optional) command.
+ * Since the command is optional, on error carry
+ * on without any update interval.
*/
- } while (num_returned && num_remaining);
+ if (scmi_sensor_update_intervals(ph, s))
+ dev_dbg(ph->dev,
+ "Update Intervals not available for sensor ID:%d\n",
+ s->id);
+ }
+ /**
+ * only > SCMIv2.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->num_axis = min_t(unsigned int,
+ SUPPORTS_AXIS(attrh) ?
+ SENSOR_AXIS_NUMBER(attrh) : 0,
+ SCMI_MAX_NUM_SENSOR_AXIS);
+ strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+
+ /*
+ * If supported overwrite short name with the extended
+ * one; on error just carry on and use already provided
+ * short name.
+ */
+ if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(attrl))
+ ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id,
+ s->name, SCMI_MAX_STR_SIZE);
+
+ if (s->extended_scalar_attrs) {
+ s->sensor_power = le32_to_cpu(sdesc->power);
+ dsize += sizeof(sdesc->power);
+
+ /* Only for sensors reporting scalar values */
+ if (s->num_axis == 0) {
+ unsigned int sres = le32_to_cpu(sdesc->resolution);
+
+ s->resolution = SENSOR_RES(sres);
+ s->exponent = S32_EXT(SENSOR_RES_EXP(sres));
+ dsize += sizeof(sdesc->resolution);
+
+ scmi_parse_range_attrs(&s->scalar_attrs,
+ &sdesc->scalar_attrs);
+ dsize += sizeof(sdesc->scalar_attrs);
+ }
+ }
+
+ if (s->num_axis > 0)
+ ret = scmi_sensor_axis_description(ph, s, si->version);
+
+ st->priv = ((u8 *)sdesc + dsize);
-out:
- ph->xops->xfer_put(ph, t);
return ret;
}
+static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
+ struct sensors_info *si)
+{
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_sens_descr_prepare_message,
+ .update_state = iter_sens_descr_update_state,
+ .process_response = iter_sens_descr_process_response,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors,
+ SENSOR_DESCRIPTION_GET,
+ sizeof(__le32), si);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
static inline int
scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id,
u8 message_id, bool enable)
@@ -966,7 +1061,9 @@ static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
int ret;
struct sensors_info *sinfo;
- ph->xops->version_get(ph, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
dev_dbg(ph->dev, "Sensor Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index e5175ef73b40..220e399118ad 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) System Power Protocol
*
- * Copyright (C) 2020-2021 ARM Ltd.
+ * Copyright (C) 2020-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
-#include "common.h"
+#include "protocols.h"
#include "notify.h"
#define SCMI_SYSTEM_NUM_SOURCES 1
@@ -113,10 +113,13 @@ static const struct scmi_protocol_events system_protocol_events = {
static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
{
+ int ret;
u32 version;
struct scmi_system_info *pinfo;
- ph->xops->version_get(ph, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
dev_dbg(ph->dev, "System Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index ac08e819088b..9d195d8719ab 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -2,13 +2,13 @@
/*
* System Control and Management Interface (SCMI) Voltage Protocol
*
- * Copyright (C) 2020-2021 ARM Ltd.
+ * Copyright (C) 2020-2022 ARM Ltd.
*/
#include <linux/module.h>
#include <linux/scmi_protocol.h>
-#include "common.h"
+#include "protocols.h"
#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
#define REMAINING_LEVELS_MASK GENMASK(31, 16)
@@ -21,13 +21,16 @@ enum scmi_voltage_protocol_cmd {
VOLTAGE_CONFIG_GET = 0x6,
VOLTAGE_LEVEL_SET = 0x7,
VOLTAGE_LEVEL_GET = 0x8,
+ VOLTAGE_DOMAIN_NAME_GET = 0x09,
};
#define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x))))
struct scmi_msg_resp_domain_attributes {
__le32 attr;
- u8 name[SCMI_MAX_STR_SIZE];
+#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_cmd_describe_levels {
@@ -54,6 +57,11 @@ struct scmi_msg_cmd_level_set {
__le32 voltage_level;
};
+struct scmi_resp_voltage_level_set_complete {
+ __le32 domain_id;
+ __le32 voltage_level;
+};
+
struct voltage_info {
unsigned int version;
unsigned int num_domains;
@@ -110,14 +118,100 @@ static int scmi_init_voltage_levels(struct device *dev,
return 0;
}
+struct scmi_volt_ipriv {
+ struct device *dev;
+ struct scmi_voltage_info *v;
+};
+
+static void iter_volt_levels_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_cmd_describe_levels *msg = message;
+ const struct scmi_volt_ipriv *p = priv;
+
+ msg->domain_id = cpu_to_le32(p->v->id);
+ msg->level_index = cpu_to_le32(desc_index);
+}
+
+static int iter_volt_levels_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ int ret = 0;
+ u32 flags;
+ const struct scmi_msg_resp_describe_levels *r = response;
+ struct scmi_volt_ipriv *p = priv;
+
+ flags = le32_to_cpu(r->flags);
+ st->num_returned = NUM_RETURNED_LEVELS(flags);
+ st->num_remaining = NUM_REMAINING_LEVELS(flags);
+
+ /* Allocate space for num_levels if not already done */
+ if (!p->v->num_levels) {
+ ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned,
+ st->num_remaining,
+ SUPPORTS_SEGMENTED_LEVELS(flags));
+ if (!ret)
+ st->max_resources = p->v->num_levels;
+ }
+
+ return ret;
+}
+
+static int
+iter_volt_levels_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ s32 val;
+ const struct scmi_msg_resp_describe_levels *r = response;
+ struct scmi_volt_ipriv *p = priv;
+
+ val = (s32)le32_to_cpu(r->voltage[st->loop_idx]);
+ p->v->levels_uv[st->desc_index + st->loop_idx] = val;
+ if (val < 0)
+ p->v->negative_volts_allowed = true;
+
+ return 0;
+}
+
+static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
+ struct scmi_voltage_info *v)
+{
+ int ret;
+ void *iter;
+ struct scmi_msg_cmd_describe_levels *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_volt_levels_prepare_message,
+ .update_state = iter_volt_levels_update_state,
+ .process_response = iter_volt_levels_process_response,
+ };
+ struct scmi_volt_ipriv vpriv = {
+ .dev = ph->dev,
+ .v = v,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
+ VOLTAGE_DESCRIBE_LEVELS,
+ sizeof(*msg), &vpriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret) {
+ v->num_levels = 0;
+ devm_kfree(ph->dev, v->levels_uv);
+ }
+
+ return ret;
+}
+
static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret, dom;
- struct scmi_xfer *td, *tl;
- struct device *dev = ph->dev;
+ struct scmi_xfer *td;
struct scmi_msg_resp_domain_attributes *resp_dom;
- struct scmi_msg_resp_describe_levels *resp_levels;
ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
sizeof(__le32), sizeof(*resp_dom), &td);
@@ -125,16 +219,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
return ret;
resp_dom = td->rx.buf;
- ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS,
- sizeof(__le64), 0, &tl);
- if (ret)
- goto outd;
- resp_levels = tl->rx.buf;
-
for (dom = 0; dom < vinfo->num_domains; dom++) {
- u32 desc_index = 0;
- u16 num_returned = 0, num_remaining = 0;
- struct scmi_msg_cmd_describe_levels *cmd;
+ u32 attributes;
struct scmi_voltage_info *v;
/* Retrieve domain attributes at first ... */
@@ -146,69 +232,31 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
v = vinfo->domains + dom;
v->id = dom;
- v->attributes = le32_to_cpu(resp_dom->attr);
+ attributes = le32_to_cpu(resp_dom->attr);
strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE);
- cmd = tl->tx.buf;
- /* ...then retrieve domain levels descriptions */
- do {
- u32 flags;
- int cnt;
-
- cmd->domain_id = cpu_to_le32(v->id);
- cmd->level_index = cpu_to_le32(desc_index);
- ret = ph->xops->do_xfer(ph, tl);
- if (ret)
- break;
-
- flags = le32_to_cpu(resp_levels->flags);
- num_returned = NUM_RETURNED_LEVELS(flags);
- num_remaining = NUM_REMAINING_LEVELS(flags);
-
- /* Allocate space for num_levels if not already done */
- if (!v->num_levels) {
- ret = scmi_init_voltage_levels(dev, v,
- num_returned,
- num_remaining,
- SUPPORTS_SEGMENTED_LEVELS(flags));
- if (ret)
- break;
- }
-
- if (desc_index + num_returned > v->num_levels) {
- dev_err(ph->dev,
- "No. of voltage levels can't exceed %d\n",
- v->num_levels);
- ret = -EINVAL;
- break;
- }
-
- for (cnt = 0; cnt < num_returned; cnt++) {
- s32 val;
-
- val =
- (s32)le32_to_cpu(resp_levels->voltage[cnt]);
- v->levels_uv[desc_index + cnt] = val;
- if (val < 0)
- v->negative_volts_allowed = true;
- }
-
- desc_index += num_returned;
-
- ph->xops->reset_rx_to_maxsz(ph, tl);
- /* check both to avoid infinite loop due to buggy fw */
- } while (num_returned && num_remaining);
-
- if (ret) {
- v->num_levels = 0;
- devm_kfree(dev, v->levels_uv);
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) {
+ if (SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph,
+ VOLTAGE_DOMAIN_NAME_GET,
+ v->id, v->name,
+ SCMI_MAX_STR_SIZE);
+ if (SUPPORTS_ASYNC_LEVEL_SET(attributes))
+ v->async_level_set = true;
}
+ ret = scmi_voltage_levels_get(ph, v);
+ /* Skip invalid voltage descriptors */
+ if (ret)
+ continue;
+
ph->xops->reset_rx_to_maxsz(ph, td);
}
- ph->xops->xfer_put(ph, tl);
-outd:
ph->xops->xfer_put(ph, td);
return ret;
@@ -271,12 +319,15 @@ static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
}
static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
- u32 domain_id, u32 flags, s32 volt_uV)
+ u32 domain_id,
+ enum scmi_voltage_level_mode mode,
+ s32 volt_uV)
{
int ret;
struct scmi_xfer *t;
struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_level_set *cmd;
+ struct scmi_voltage_info *v;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
@@ -286,12 +337,31 @@ static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
if (ret)
return ret;
+ v = vinfo->domains + domain_id;
+
cmd = t->tx.buf;
cmd->domain_id = cpu_to_le32(domain_id);
- cmd->flags = cpu_to_le32(flags);
cmd->voltage_level = cpu_to_le32(volt_uV);
- ret = ph->xops->do_xfer(ph, t);
+ if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) {
+ cmd->flags = cpu_to_le32(0x0);
+ ret = ph->xops->do_xfer(ph, t);
+ } else {
+ cmd->flags = cpu_to_le32(0x1);
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_resp_voltage_level_set_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->domain_id) == domain_id)
+ dev_dbg(ph->dev,
+ "Voltage domain %d set async to %d\n",
+ v->id,
+ le32_to_cpu(resp->voltage_level));
+ else
+ ret = -EPROTO;
+ }
+ }
ph->xops->xfer_put(ph, t);
return ret;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 491bbf70c94a..3163660fa8e2 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1379,6 +1379,10 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
},
+ { .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
{ .compatible = "qcom,scm-msm8994" },
{ .compatible = "qcom,scm-msm8996" },
{ .compatible = "qcom,scm" },
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 4697edc125b1..ebc32bbd9b83 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -96,6 +97,7 @@ struct ti_sci_desc {
* @node: list head
* @host_id: Host ID
* @users: Number of users of this instance
+ * @is_suspending: Flag set to indicate in suspend path.
*/
struct ti_sci_info {
struct device *dev;
@@ -114,7 +116,7 @@ struct ti_sci_info {
u8 host_id;
/* protected by ti_sci_list_mutex */
int users;
-
+ bool is_suspending;
};
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
@@ -349,6 +351,8 @@ static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
xfer->tx_message.len = tx_message_size;
+ xfer->tx_message.chan_rx = info->chan_rx;
+ xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
xfer->rx_len = (u8)rx_message_size;
reinit_completion(&xfer->done);
@@ -406,6 +410,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
int ret;
int timeout;
struct device *dev = info->dev;
+ bool done_state = true;
ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
if (ret < 0)
@@ -413,13 +418,27 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0;
- /* And we wait for the response. */
- timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
- if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ if (!info->is_suspending) {
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout))
+ ret = -ETIMEDOUT;
+ } else {
+ /*
+ * If we are suspending, we cannot use wait_for_completion_timeout
+ * during noirq phase, so we must manually poll the completion.
+ */
+ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+ true, 1,
+ info->desc->max_rx_timeout_ms * 1000,
+ false, &xfer->done);
+ }
+
+ if (ret == -ETIMEDOUT || !done_state) {
dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_);
- ret = -ETIMEDOUT;
}
+
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
@@ -3264,6 +3283,35 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
return NOTIFY_BAD;
}
+static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
+{
+ info->is_suspending = is_suspending;
+}
+
+static int ti_sci_suspend(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ /*
+ * We must switch operation to polled mode now as drivers and the genpd
+ * layer may make late TI SCI calls to change clock and device states
+ * from the noirq phase of suspend.
+ */
+ ti_sci_set_is_suspending(info, true);
+
+ return 0;
+}
+
+static int ti_sci_resume(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ ti_sci_set_is_suspending(info, false);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
+
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
@@ -3472,6 +3520,7 @@ static struct platform_driver ti_sci_driver = {
.driver = {
.name = "ti-sci",
.of_match_table = of_match_ptr(ti_sci_of_match),
+ .pm = &ti_sci_pm_ops,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 30bff6cb1b8d..b7800b37af78 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -103,7 +103,7 @@ config TI_EMIF
temperature changes
config OMAP_GPMC
- bool "Texas Instruments OMAP SoC GPMC driver" if COMPILE_TEST
+ tristate "Texas Instruments OMAP SoC GPMC driver"
depends on OF_ADDRESS
select GPIOLIB
help
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 14412002775d..76c82e9c8fce 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -857,7 +857,6 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct brcmstb_dpfe_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -869,22 +868,19 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
mutex_init(&priv->lock);
platform_set_drvdata(pdev, priv);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource_byname(pdev, "dpfe-cpu");
if (IS_ERR(priv->regs)) {
dev_err(dev, "couldn't map DCPU registers\n");
return -ENODEV;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
- priv->dmem = devm_ioremap_resource(dev, res);
+ priv->dmem = devm_platform_ioremap_resource_byname(pdev, "dpfe-dmem");
if (IS_ERR(priv->dmem)) {
dev_err(dev, "Couldn't map DCPU data memory\n");
return -ENOENT;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
- priv->imem = devm_ioremap_resource(dev, res);
+ priv->imem = devm_platform_ioremap_resource_byname(pdev, "dpfe-imem");
if (IS_ERR(priv->imem)) {
dev_err(dev, "Couldn't map DCPU instruction memory\n");
return -ENOENT;
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
index 872addd0ec60..b32005bf269c 100644
--- a/drivers/memory/da8xx-ddrctl.c
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -115,8 +115,7 @@ static int da8xx_ddrctl_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddrctl = devm_ioremap_resource(dev, res);
+ ddrctl = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ddrctl)) {
dev_err(dev, "unable to map memory controller registers\n");
return PTR_ERR(ddrctl);
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index ecc78d6f89ed..6c2a421b86e3 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1025,10 +1025,8 @@ static struct emif_data *__init_or_module get_device_details(
temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
- if (!emif || !temp || !dev_info) {
- dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
+ if (!emif || !temp || !dev_info)
goto error;
- }
memcpy(temp, pd, sizeof(*pd));
pd = temp;
@@ -1067,9 +1065,6 @@ static struct emif_data *__init_or_module get_device_details(
temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
if (temp)
memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
- else
- dev_warn(dev, "%s:%d: allocation error\n", __func__,
- __LINE__);
pd->custom_configs = temp;
}
@@ -1084,8 +1079,6 @@ static struct emif_data *__init_or_module get_device_details(
memcpy(temp, pd->timings, size);
pd->timings = temp;
} else {
- dev_warn(dev, "%s:%d: allocation error\n", __func__,
- __LINE__);
get_default_timings(emif);
}
} else {
@@ -1098,8 +1091,6 @@ static struct emif_data *__init_or_module get_device_details(
memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
pd->min_tck = temp;
} else {
- dev_warn(dev, "%s:%d: allocation error\n", __func__,
- __LINE__);
pd->min_tck = &lpddr2_jedec_min_tck;
}
} else {
@@ -1116,7 +1107,6 @@ error:
static int __init_or_module emif_probe(struct platform_device *pdev)
{
struct emif_data *emif;
- struct resource *res;
int irq, ret;
if (pdev->dev.of_node)
@@ -1135,8 +1125,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
emif->dev = &pdev->dev;
platform_set_drvdata(pdev, emif);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emif->base = devm_ioremap_resource(emif->dev, res);
+ emif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emif->base))
goto error;
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index f8ea592c9cb5..7fc9f57ae278 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -172,7 +172,6 @@ out:
static int ccf_probe(struct platform_device *pdev)
{
struct ccf_private *ccf;
- struct resource *r;
const struct of_device_id *match;
u32 errinten;
int ret, irq;
@@ -185,13 +184,7 @@ static int ccf_probe(struct platform_device *pdev)
if (!ccf)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
- return -ENXIO;
- }
-
- ccf->regs = devm_ioremap_resource(&pdev->dev, r);
+ ccf->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccf->regs))
return PTR_ERR(ccf->regs);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index ed11887c1b7c..2351f2708da2 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -12,6 +12,7 @@
#include <linux/cpu_pm.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -1889,16 +1890,6 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
}
#ifdef CONFIG_OF
-static const struct of_device_id gpmc_dt_ids[] = {
- { .compatible = "ti,omap2420-gpmc" },
- { .compatible = "ti,omap2430-gpmc" },
- { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
- { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
- { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
- { .compatible = "ti,am64-gpmc" },
- { }
-};
-
static void gpmc_cs_set_name(int cs, const char *name)
{
struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
@@ -2257,11 +2248,9 @@ no_timings:
if (!of_platform_device_create(child, NULL, &pdev->dev))
goto err_child_fail;
- /* is child a common bus? */
- if (of_match_node(of_default_bus_match_table, child))
- /* create children and other common bus children */
- if (of_platform_default_populate(child, NULL, &pdev->dev))
- goto err_child_fail;
+ /* create children and other common bus children */
+ if (of_platform_default_populate(child, NULL, &pdev->dev))
+ goto err_child_fail;
return 0;
@@ -2278,6 +2267,8 @@ err:
return ret;
}
+static const struct of_device_id gpmc_dt_ids[];
+
static int gpmc_probe_dt(struct platform_device *pdev)
{
int ret;
@@ -2644,6 +2635,19 @@ static int gpmc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id gpmc_dt_ids[] = {
+ { .compatible = "ti,omap2420-gpmc" },
+ { .compatible = "ti,omap2430-gpmc" },
+ { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
+ { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
+ { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
+ { .compatible = "ti,am64-gpmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
+#endif
+
static struct platform_driver gpmc_driver = {
.probe = gpmc_probe,
.remove = gpmc_remove,
@@ -2654,8 +2658,7 @@ static struct platform_driver gpmc_driver = {
},
};
-static __init int gpmc_init(void)
-{
- return platform_driver_register(&gpmc_driver);
-}
-postcore_initcall(gpmc_init);
+module_platform_driver(gpmc_driver);
+
+MODULE_DESCRIPTION("Texas Instruments GPMC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 019a0822bde0..4316988d791a 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -259,8 +259,7 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
rpc->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- rpc->base = devm_ioremap_resource(&pdev->dev, res);
+ rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(rpc->base))
return PTR_ERR(rpc->base);
@@ -488,7 +487,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
case RPCIF_DATA_OUT:
while (pos < rpc->xferlen) {
u32 bytes_left = rpc->xferlen - pos;
- u32 nbytes, data[2];
+ u32 nbytes, data[2], *p = data;
smcr = rpc->smcr | RPCIF_SMCR_SPIE;
@@ -502,15 +501,9 @@ int rpcif_manual_xfer(struct rpcif *rpc)
rpc->xfer_size = nbytes;
memcpy(data, rpc->buffer + pos, nbytes);
- if (nbytes == 8) {
- regmap_write(rpc->regmap, RPCIF_SMWDR1,
- data[0]);
- regmap_write(rpc->regmap, RPCIF_SMWDR0,
- data[1]);
- } else {
- regmap_write(rpc->regmap, RPCIF_SMWDR0,
- data[0]);
- }
+ if (nbytes == 8)
+ regmap_write(rpc->regmap, RPCIF_SMWDR1, *p++);
+ regmap_write(rpc->regmap, RPCIF_SMWDR0, *p);
regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
ret = wait_msg_xfer_end(rpc);
@@ -552,7 +545,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
}
while (pos < rpc->xferlen) {
u32 bytes_left = rpc->xferlen - pos;
- u32 nbytes, data[2];
+ u32 nbytes, data[2], *p = data;
/* nbytes may only be 1, 2, 4, or 8 */
nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
@@ -569,15 +562,9 @@ int rpcif_manual_xfer(struct rpcif *rpc)
if (ret)
goto err_out;
- if (nbytes == 8) {
- regmap_read(rpc->regmap, RPCIF_SMRDR1,
- &data[0]);
- regmap_read(rpc->regmap, RPCIF_SMRDR0,
- &data[1]);
- } else {
- regmap_read(rpc->regmap, RPCIF_SMRDR0,
- &data[0]);
- }
+ if (nbytes == 8)
+ regmap_read(rpc->regmap, RPCIF_SMRDR1, p++);
+ regmap_read(rpc->regmap, RPCIF_SMRDR0, p);
memcpy(rpc->buffer + pos, data, nbytes);
pos += nbytes;
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 9c8318923ed0..4733e7898ffe 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1322,7 +1322,6 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
*/
static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
{
- int counters_size;
int ret, i;
dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
@@ -1332,8 +1331,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
return dmc->num_counters;
}
- counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters;
- dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL);
+ dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
+ sizeof(*dmc->counter), GFP_KERNEL);
if (!dmc->counter)
return -ENOMEM;
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index c992e87782d2..0750847dac3c 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -9,6 +9,7 @@ tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o
tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
tegra-mc-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
tegra-mc-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra194.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra186.o tegra234.o
obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
@@ -19,5 +20,6 @@ obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-emc.o
obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra186-emc.o
tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 44b4a4080920..2f7a58a9df1a 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -45,6 +45,9 @@ static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_194_SOC
{ .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
#endif
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+ { .compatible = "nvidia,tegra234-mc", .data = &tegra234_mc_soc },
+#endif
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
@@ -505,14 +508,54 @@ int tegra30_mc_probe(struct tegra_mc *mc)
return 0;
}
-static irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
+const struct tegra_mc_ops tegra30_mc_ops = {
+ .probe = tegra30_mc_probe,
+ .handle_irq = tegra30_mc_handle_irq,
+};
+#endif
+
+static int mc_global_intstatus_to_channel(const struct tegra_mc *mc, u32 status,
+ unsigned int *mc_channel)
+{
+ if ((status & mc->soc->ch_intmask) == 0)
+ return -EINVAL;
+
+ *mc_channel = __ffs((status & mc->soc->ch_intmask) >>
+ mc->soc->global_intstatus_channel_shift);
+
+ return 0;
+}
+
+static u32 mc_channel_to_global_intstatus(const struct tegra_mc *mc,
+ unsigned int channel)
+{
+ return BIT(channel) << mc->soc->global_intstatus_channel_shift;
+}
+
+irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
{
struct tegra_mc *mc = data;
+ unsigned int bit, channel;
unsigned long status;
- unsigned int bit;
- /* mask all interrupts to avoid flooding */
- status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ if (mc->soc->num_channels) {
+ u32 global_status;
+ int err;
+
+ global_status = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, MC_GLOBAL_INTSTATUS);
+ err = mc_global_intstatus_to_channel(mc, global_status, &channel);
+ if (err < 0) {
+ dev_err_ratelimited(mc->dev, "unknown interrupt channel 0x%08x\n",
+ global_status);
+ return IRQ_NONE;
+ }
+
+ /* mask all interrupts to avoid flooding */
+ status = mc_ch_readl(mc, channel, MC_INTSTATUS) & mc->soc->intmask;
+ } else {
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ }
+
if (!status)
return IRQ_NONE;
@@ -520,18 +563,70 @@ static irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
const char *error = tegra_mc_status_names[bit] ?: "unknown";
const char *client = "unknown", *desc;
const char *direction, *secure;
+ u32 status_reg, addr_reg;
+ u32 intmask = BIT(bit);
phys_addr_t addr = 0;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ u32 addr_hi_reg = 0;
+#endif
unsigned int i;
char perm[7];
u8 id, type;
u32 value;
- value = mc_readl(mc, MC_ERR_STATUS);
+ switch (intmask) {
+ case MC_INT_DECERR_VPR:
+ status_reg = MC_ERR_VPR_STATUS;
+ addr_reg = MC_ERR_VPR_ADR;
+ break;
+
+ case MC_INT_SECERR_SEC:
+ status_reg = MC_ERR_SEC_STATUS;
+ addr_reg = MC_ERR_SEC_ADR;
+ break;
+
+ case MC_INT_DECERR_MTS:
+ status_reg = MC_ERR_MTS_STATUS;
+ addr_reg = MC_ERR_MTS_ADR;
+ break;
+
+ case MC_INT_DECERR_GENERALIZED_CARVEOUT:
+ status_reg = MC_ERR_GENERALIZED_CARVEOUT_STATUS;
+ addr_reg = MC_ERR_GENERALIZED_CARVEOUT_ADR;
+ break;
+
+ case MC_INT_DECERR_ROUTE_SANITY:
+ status_reg = MC_ERR_ROUTE_SANITY_STATUS;
+ addr_reg = MC_ERR_ROUTE_SANITY_ADR;
+ break;
+
+ default:
+ status_reg = MC_ERR_STATUS;
+ addr_reg = MC_ERR_ADR;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (mc->soc->has_addr_hi_reg)
+ addr_hi_reg = MC_ERR_ADR_HI;
+#endif
+ break;
+ }
+
+ if (mc->soc->num_channels)
+ value = mc_ch_readl(mc, channel, status_reg);
+ else
+ value = mc_readl(mc, status_reg);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (mc->soc->num_address_bits > 32) {
- addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
- MC_ERR_STATUS_ADR_HI_MASK);
+ if (addr_hi_reg) {
+ if (mc->soc->num_channels)
+ addr = mc_ch_readl(mc, channel, addr_hi_reg);
+ else
+ addr = mc_readl(mc, addr_hi_reg);
+ } else {
+ addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
+ MC_ERR_STATUS_ADR_HI_MASK);
+ }
addr <<= 32;
}
#endif
@@ -588,7 +683,10 @@ static irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
break;
}
- value = mc_readl(mc, MC_ERR_ADR);
+ if (mc->soc->num_channels)
+ value = mc_ch_readl(mc, channel, addr_reg);
+ else
+ value = mc_readl(mc, addr_reg);
addr |= value;
dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
@@ -597,17 +695,18 @@ static irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
}
/* clear interrupts */
- mc_writel(mc, status, MC_INTSTATUS);
+ if (mc->soc->num_channels) {
+ mc_ch_writel(mc, channel, status, MC_INTSTATUS);
+ mc_ch_writel(mc, MC_BROADCAST_CHANNEL,
+ mc_channel_to_global_intstatus(mc, channel),
+ MC_GLOBAL_INTSTATUS);
+ } else {
+ mc_writel(mc, status, MC_INTSTATUS);
+ }
return IRQ_HANDLED;
}
-const struct tegra_mc_ops tegra30_mc_ops = {
- .probe = tegra30_mc_probe,
- .handle_irq = tegra30_mc_handle_irq,
-};
-#endif
-
const char *const tegra_mc_status_names[32] = {
[ 1] = "External interrupt",
[ 6] = "EMEM address decode error",
@@ -619,6 +718,8 @@ const char *const tegra_mc_status_names[32] = {
[12] = "VPR violation",
[13] = "Secure carveout violation",
[16] = "MTS carveout violation",
+ [17] = "Generalized carveout violation",
+ [20] = "Route Sanity error",
};
const char *const tegra_mc_error_names[8] = {
@@ -716,7 +817,6 @@ del_provider:
static int tegra_mc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct tegra_mc *mc;
u64 mask;
int err;
@@ -741,8 +841,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
/* length of MC tick in nanoseconds */
mc->tick = 30;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mc->regs = devm_ioremap_resource(&pdev->dev, res);
+ mc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mc->regs))
return PTR_ERR(mc->regs);
@@ -761,7 +860,11 @@ static int tegra_mc_probe(struct platform_device *pdev)
WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
- mc_writel(mc, mc->soc->intmask, MC_INTMASK);
+ if (mc->soc->num_channels)
+ mc_ch_writel(mc, MC_BROADCAST_CHANNEL, mc->soc->intmask,
+ MC_INTMASK);
+ else
+ mc_writel(mc, mc->soc->intmask, MC_INTMASK);
err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
dev_name(&pdev->dev), mc);
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 1e492989c363..bc01586b6560 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -43,7 +43,21 @@
#define MC_EMEM_ARB_OVERRIDE 0xe8
#define MC_TIMING_CONTROL_DBG 0xf8
#define MC_TIMING_CONTROL 0xfc
-
+#define MC_ERR_VPR_STATUS 0x654
+#define MC_ERR_VPR_ADR 0x658
+#define MC_ERR_SEC_STATUS 0x67c
+#define MC_ERR_SEC_ADR 0x680
+#define MC_ERR_MTS_STATUS 0x9b0
+#define MC_ERR_MTS_ADR 0x9b4
+#define MC_ERR_ROUTE_SANITY_STATUS 0x9c0
+#define MC_ERR_ROUTE_SANITY_ADR 0x9c4
+#define MC_ERR_GENERALIZED_CARVEOUT_STATUS 0xc00
+#define MC_ERR_GENERALIZED_CARVEOUT_ADR 0xc04
+#define MC_GLOBAL_INTSTATUS 0xf24
+#define MC_ERR_ADR_HI 0x11fc
+
+#define MC_INT_DECERR_ROUTE_SANITY BIT(20)
+#define MC_INT_DECERR_GENERALIZED_CARVEOUT BIT(17)
#define MC_INT_DECERR_MTS BIT(16)
#define MC_INT_SECERR_SEC BIT(13)
#define MC_INT_DECERR_VPR BIT(12)
@@ -78,6 +92,8 @@
#define MC_TIMING_UPDATE BIT(0)
+#define MC_BROADCAST_CHANNEL ~0
+
static inline u32 tegra_mc_scale_percents(u64 val, unsigned int percents)
{
val = val * percents;
@@ -92,6 +108,30 @@ icc_provider_to_tegra_mc(struct icc_provider *provider)
return container_of(provider, struct tegra_mc, provider);
}
+static inline u32 mc_ch_readl(const struct tegra_mc *mc, int ch,
+ unsigned long offset)
+{
+ if (!mc->bcast_ch_regs)
+ return 0;
+
+ if (ch == MC_BROADCAST_CHANNEL)
+ return readl_relaxed(mc->bcast_ch_regs + offset);
+
+ return readl_relaxed(mc->ch_regs[ch] + offset);
+}
+
+static inline void mc_ch_writel(const struct tegra_mc *mc, int ch,
+ u32 value, unsigned long offset)
+{
+ if (!mc->bcast_ch_regs)
+ return;
+
+ if (ch == MC_BROADCAST_CHANNEL)
+ writel_relaxed(value, mc->bcast_ch_regs + offset);
+ else
+ writel_relaxed(value, mc->ch_regs[ch] + offset);
+}
+
static inline u32 mc_readl(const struct tegra_mc *mc, unsigned long offset)
{
return readl_relaxed(mc->regs + offset);
@@ -137,6 +177,10 @@ extern const struct tegra_mc_soc tegra186_mc_soc;
extern const struct tegra_mc_soc tegra194_mc_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+extern const struct tegra_mc_soc tegra234_mc_soc;
+#endif
+
#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
defined(CONFIG_ARCH_TEGRA_114_SOC) || \
defined(CONFIG_ARCH_TEGRA_124_SOC) || \
@@ -147,10 +191,12 @@ extern const struct tegra_mc_ops tegra30_mc_ops;
#endif
#if defined(CONFIG_ARCH_TEGRA_186_SOC) || \
- defined(CONFIG_ARCH_TEGRA_194_SOC)
+ defined(CONFIG_ARCH_TEGRA_194_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_234_SOC)
extern const struct tegra_mc_ops tegra186_mc_ops;
#endif
+irqreturn_t tegra30_mc_handle_irq(int irq, void *data);
extern const char * const tegra_mc_status_names[32];
extern const char * const tegra_mc_error_names[8];
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 746c4ef2c0af..54b47ca33483 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -273,6 +273,9 @@ static const struct of_device_id tegra186_emc_of_match[] = {
#if defined(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra194-emc" },
#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ { .compatible = "nvidia,tegra234-emc" },
+#endif
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tegra186_emc_of_match);
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 3d153881abc1..62477e592bf5 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -16,6 +16,8 @@
#include <dt-bindings/memory/tegra186-mc.h>
#endif
+#include "mc.h"
+
#define MC_SID_STREAMID_OVERRIDE_MASK GENMASK(7, 0)
#define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
#define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
@@ -48,8 +50,37 @@ static void tegra186_mc_program_sid(struct tegra_mc *mc)
static int tegra186_mc_probe(struct tegra_mc *mc)
{
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ unsigned int i;
+ char name[8];
int err;
+ mc->bcast_ch_regs = devm_platform_ioremap_resource_byname(pdev, "broadcast");
+ if (IS_ERR(mc->bcast_ch_regs)) {
+ if (PTR_ERR(mc->bcast_ch_regs) == -EINVAL) {
+ dev_warn(&pdev->dev,
+ "Broadcast channel is missing, please update your device-tree\n");
+ mc->bcast_ch_regs = NULL;
+ goto populate;
+ }
+
+ return PTR_ERR(mc->bcast_ch_regs);
+ }
+
+ mc->ch_regs = devm_kcalloc(mc->dev, mc->soc->num_channels, sizeof(*mc->ch_regs),
+ GFP_KERNEL);
+ if (!mc->ch_regs)
+ return -ENOMEM;
+
+ for (i = 0; i < mc->soc->num_channels; i++) {
+ snprintf(name, sizeof(name), "ch%u", i);
+
+ mc->ch_regs[i] = devm_platform_ioremap_resource_byname(pdev, name);
+ if (IS_ERR(mc->ch_regs[i]))
+ return PTR_ERR(mc->ch_regs[i]);
+ }
+
+populate:
err = of_platform_populate(mc->dev->of_node, NULL, NULL, mc->dev);
if (err < 0)
return err;
@@ -144,6 +175,7 @@ const struct tegra_mc_ops tegra186_mc_ops = {
.remove = tegra186_mc_remove,
.resume = tegra186_mc_resume,
.probe_device = tegra186_mc_probe_device,
+ .handle_irq = tegra30_mc_handle_irq,
};
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
@@ -875,6 +907,13 @@ const struct tegra_mc_soc tegra186_mc_soc = {
.num_clients = ARRAY_SIZE(tegra186_mc_clients),
.clients = tegra186_mc_clients,
.num_address_bits = 40,
+ .num_channels = 4,
+ .client_id_mask = 0xff,
+ .intmask = MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
.ops = &tegra186_mc_ops,
+ .ch_intmask = 0x0000000f,
+ .global_intstatus_channel_shift = 0,
};
#endif
diff --git a/drivers/memory/tegra/tegra194.c b/drivers/memory/tegra/tegra194.c
index cab998b8bd5c..b2416ee3ac26 100644
--- a/drivers/memory/tegra/tegra194.c
+++ b/drivers/memory/tegra/tegra194.c
@@ -1347,5 +1347,14 @@ const struct tegra_mc_soc tegra194_mc_soc = {
.num_clients = ARRAY_SIZE(tegra194_mc_clients),
.clients = tegra194_mc_clients,
.num_address_bits = 40,
+ .num_channels = 16,
+ .client_id_mask = 0xff,
+ .intmask = MC_INT_DECERR_ROUTE_SANITY |
+ MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .has_addr_hi_reg = true,
.ops = &tegra186_mc_ops,
+ .ch_intmask = 0x00000f00,
+ .global_intstatus_channel_shift = 8,
};
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
new file mode 100644
index 000000000000..e23ebd421f17
--- /dev/null
+++ b/drivers/memory/tegra/tegra234.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/mc.h>
+
+#include <dt-bindings/memory/tegra234-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra234_mc_clients[] = {
+ {
+ .id = TEGRA234_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .sid = TEGRA234_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .sid = TEGRA234_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ },
+};
+
+const struct tegra_mc_soc tegra234_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra234_mc_clients),
+ .clients = tegra234_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 16,
+ .client_id_mask = 0x1ff,
+ .intmask = MC_INT_DECERR_ROUTE_SANITY |
+ MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .has_addr_hi_reg = true,
+ .ops = &tegra186_mc_ops,
+ .ch_intmask = 0x0000ff00,
+ .global_intstatus_channel_shift = 8,
+};
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 51d20c2ccb75..f81e7df8798a 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -328,7 +328,6 @@ static int aemif_probe(struct platform_device *pdev)
{
int i;
int ret = -ENODEV;
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child_np;
@@ -362,8 +361,7 @@ static int aemif_probe(struct platform_device *pdev)
else if (pdata)
aemif->cs_offset = pdata->cs_offset;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- aemif->base = devm_ioremap_resource(dev, res);
+ aemif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aemif->base)) {
ret = PTR_ERR(aemif->base);
goto error;
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 179fec2da56d..31d6266f008c 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -290,9 +290,9 @@ static int ti_emif_probe(struct platform_device *pdev)
emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev,
- res);
+ emif_data->pm_data.ti_emif_base_addr_virt = devm_platform_get_and_ioremap_resource(pdev,
+ 0,
+ &res);
if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) {
ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt);
return ret;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index d6d056963c06..877d2ec4ea9f 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -91,3 +91,16 @@ config NVME_TCP
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
+
+config NVME_APPLE
+ tristate "Apple ANS2 NVM Express host driver"
+ depends on OF && BLOCK
+ depends on APPLE_RTKIT && APPLE_SART
+ depends on ARCH_APPLE || COMPILE_TEST
+ select NVME_CORE
+ help
+ This provides support for the NVMe controller embedded in Apple SoCs
+ such as the M1.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme-apple.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 476c5c988496..a36ae1612059 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
+obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
nvme-core-y := core.o ioctl.o constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
@@ -25,3 +26,5 @@ nvme-rdma-y += rdma.o
nvme-fc-y += fc.o
nvme-tcp-y += tcp.o
+
+nvme-apple-y += apple.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
new file mode 100644
index 000000000000..d702d7d60235
--- /dev/null
+++ b/drivers/nvme/host/apple.c
@@ -0,0 +1,1593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple ANS NVM Express device driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on the pci.c NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ * and on the rdma.c NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+
+#include <linux/async.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/once.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/soc/apple/sart.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+
+#include "nvme.h"
+
+#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
+#define APPLE_ANS_MAX_QUEUE_DEPTH 64
+
+#define APPLE_ANS_COPROC_CPU_CONTROL 0x44
+#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define APPLE_ANS_ACQ_DB 0x1004
+#define APPLE_ANS_IOCQ_DB 0x100c
+
+#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
+
+#define APPLE_ANS_BOOT_STATUS 0x1300
+#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
+
+#define APPLE_ANS_UNKNOWN_CTRL 0x24008
+#define APPLE_ANS_PRP_NULL_CHECK BIT(11)
+
+#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
+#define APPLE_ANS_LINEAR_SQ_EN BIT(0)
+
+#define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
+#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
+
+#define APPLE_NVMMU_NUM_TCBS 0x28100
+#define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
+#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
+#define APPLE_NVMMU_TCB_INVAL 0x28118
+#define APPLE_NVMMU_TCB_STAT 0x28120
+
+/*
+ * This controller is a bit weird in the way command tags works: Both the
+ * admin and the IO queue share the same tag space. Additionally, tags
+ * cannot be higher than 0x40 which effectively limits the combined
+ * queue depth to 0x40. Instead of wasting half of that on the admin queue
+ * which gets much less traffic we instead reduce its size here.
+ * The controller also doesn't support async event such that no space must
+ * be reserved for NVME_NR_AEN_COMMANDS.
+ */
+#define APPLE_NVME_AQ_DEPTH 2
+#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS 127
+
+/*
+ * This controller comes with an embedded IOMMU known as NVMMU.
+ * The NVMMU is pointed to an array of TCBs indexed by the command tag.
+ * Each command must be configured inside this structure before it's allowed
+ * to execute, including commands that don't require DMA transfers.
+ *
+ * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
+ * admin queue): Those commands must still be added to the NVMMU but the DMA
+ * buffers cannot be represented as PRPs and must instead be allowed using SART.
+ *
+ * Programming the PRPs to the same values as those in the submission queue
+ * looks rather silly at first. This hardware is however designed for a kernel
+ * that runs the NVMMU code in a higher exception level than the NVMe driver.
+ * In that setting the NVMe driver first programs the submission queue entry
+ * and then executes a hypercall to the code that is allowed to program the
+ * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
+ * verifying that they don't point to kernel text, data, pagetables, or similar
+ * protected areas before programming the TCB to point to this shadow copy.
+ * Since Linux doesn't do any of that we may as well just point both the queue
+ * and the TCB PRP pointer to the same memory.
+ */
+struct apple_nvmmu_tcb {
+ u8 opcode;
+
+#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
+#define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
+ u8 dma_flags;
+
+ u8 command_id;
+ u8 _unk0;
+ __le16 length;
+ u8 _unk1[18];
+ __le64 prp1;
+ __le64 prp2;
+ u8 _unk2[16];
+ u8 aes_iv[8];
+ u8 _aes_unk[64];
+};
+
+/*
+ * The Apple NVMe controller only supports a single admin and a single IO queue
+ * which are both limited to 64 entries and share a single interrupt.
+ *
+ * The completion queue works as usual. The submission "queue" instead is
+ * an array indexed by the command tag on this hardware. Commands must also be
+ * present in the NVMMU's tcb array. They are triggered by writing their tag to
+ * a MMIO register.
+ */
+struct apple_nvme_queue {
+ struct nvme_command *sqes;
+ struct nvme_completion *cqes;
+ struct apple_nvmmu_tcb *tcbs;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ dma_addr_t tcb_dma_addr;
+
+ u32 __iomem *sq_db;
+ u32 __iomem *cq_db;
+
+ u16 cq_head;
+ u8 cq_phase;
+
+ bool is_adminq;
+ bool enabled;
+};
+
+/*
+ * The apple_nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP chunk allocations in addition
+ * to the actual struct scatterlist.
+ */
+struct apple_nvme_iod {
+ struct nvme_request req;
+ struct nvme_command cmd;
+ struct apple_nvme_queue *q;
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int nents; /* Used in scatterlist */
+ dma_addr_t first_dma;
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ struct scatterlist *sg;
+};
+
+struct apple_nvme {
+ struct device *dev;
+
+ void __iomem *mmio_coproc;
+ void __iomem *mmio_nvme;
+
+ struct device **pd_dev;
+ struct device_link **pd_link;
+ int pd_count;
+
+ struct apple_sart *sart;
+ struct apple_rtkit *rtk;
+ struct reset_control *reset;
+
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ mempool_t *iod_mempool;
+
+ struct nvme_ctrl ctrl;
+ struct work_struct remove_work;
+
+ struct apple_nvme_queue adminq;
+ struct apple_nvme_queue ioq;
+
+ struct blk_mq_tag_set admin_tagset;
+ struct blk_mq_tag_set tagset;
+
+ int irq;
+ spinlock_t lock;
+};
+
+static_assert(sizeof(struct nvme_command) == 64);
+static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
+
+static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct apple_nvme, ctrl);
+}
+
+static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return container_of(q, struct apple_nvme, adminq);
+ else
+ return container_of(q, struct apple_nvme, ioq);
+}
+
+static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return APPLE_NVME_AQ_DEPTH;
+ else
+ return APPLE_ANS_MAX_QUEUE_DEPTH;
+}
+
+static void apple_nvme_rtkit_crashed(void *cookie)
+{
+ struct apple_nvme *anv = cookie;
+
+ dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
+ nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_sart_dma_setup(void *cookie,
+ struct apple_rtkit_shmem *bfr)
+{
+ struct apple_nvme *anv = cookie;
+ int ret;
+
+ if (bfr->iova)
+ return -EINVAL;
+ if (!bfr->size)
+ return -EINVAL;
+
+ bfr->buffer =
+ dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
+ if (ret) {
+ dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+ bfr->buffer = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void apple_nvme_sart_dma_destroy(void *cookie,
+ struct apple_rtkit_shmem *bfr)
+{
+ struct apple_nvme *anv = cookie;
+
+ apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
+ dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+}
+
+static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
+ .crashed = apple_nvme_rtkit_crashed,
+ .shmem_setup = apple_nvme_sart_dma_setup,
+ .shmem_destroy = apple_nvme_sart_dma_destroy,
+};
+
+static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
+ if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
+ dev_warn_ratelimited(anv->dev,
+ "NVMMU TCB invalidation failed\n");
+}
+
+static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+ struct nvme_command *cmd)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ u32 tag = nvme_tag_from_cid(cmd->common.command_id);
+ struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
+
+ tcb->opcode = cmd->common.opcode;
+ tcb->prp1 = cmd->common.dptr.prp1;
+ tcb->prp2 = cmd->common.dptr.prp2;
+ tcb->length = cmd->rw.length;
+ tcb->command_id = tag;
+
+ if (nvme_is_write(cmd))
+ tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
+ else
+ tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
+
+ memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
+
+ /*
+ * This lock here doesn't make much sense at a first glace but
+ * removing it will result in occasional missed completetion
+ * interrupts even though the commands still appear on the CQ.
+ * It's unclear why this happens but our best guess is that
+ * there is a bug in the firmware triggered when a new command
+ * is issued while we're inside the irq handler between the
+ * NVMMU invalidation (and making the tag available again)
+ * and the final CQ update.
+ */
+ spin_lock_irq(&anv->lock);
+ writel(tag, q->sq_db);
+ spin_unlock_irq(&anv->lock);
+}
+
+/*
+ * From pci.c:
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static inline size_t apple_nvme_iod_alloc_size(void)
+{
+ const unsigned int nprps = DIV_ROUND_UP(
+ NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
+ const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ const size_t alloc_size = sizeof(__le64 *) * npages +
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
+
+ return alloc_size;
+}
+
+static void **apple_nvme_iod_list(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+}
+
+static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
+{
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = apple_nvme_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+}
+
+static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (iod->dma_len) {
+ dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
+ }
+
+ WARN_ON_ONCE(!iod->nents);
+
+ dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+ if (iod->npages == 0)
+ dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
+ iod->first_dma);
+ else
+ apple_nvme_free_prps(anv, req);
+ mempool_free(iod->sg, anv->iod_mempool);
+}
+
+static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
+ i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+}
+
+static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_rw_command *cmnd)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct dma_pool *pool;
+ int length = blk_rq_payload_bytes(req);
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+ __le64 *prp_list;
+ void **list = apple_nvme_iod_list(req);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ goto done;
+ }
+
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= NVME_CTRL_PAGE_SIZE) {
+ iod->first_dma = dma_addr;
+ goto done;
+ }
+
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = anv->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = anv->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return BLK_STS_RESOURCE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+ __le64 *old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+ goto free_prps;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+ return BLK_STS_OK;
+free_prps:
+ apple_nvme_free_prps(anv, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
+ WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
+ "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
+ iod->nents);
+ return BLK_STS_IOERR;
+}
+
+static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
+
+ iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(anv->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ return BLK_STS_OK;
+}
+
+static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int nr_mapped;
+
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct bio_vec bv = req_bvec(req);
+
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
+ &bv);
+ }
+
+ iod->dma_len = 0;
+ iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return BLK_STS_RESOURCE;
+ sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ if (!iod->nents)
+ goto out_free_sg;
+
+ nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
+ goto out_free_sg;
+
+ ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
+ if (ret != BLK_STS_OK)
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+out_free_sg:
+ mempool_free(iod->sg, anv->iod_mempool);
+ return ret;
+}
+
+static __always_inline void apple_nvme_unmap_rq(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
+
+ if (blk_rq_nr_phys_segments(req))
+ apple_nvme_unmap_data(anv, req);
+}
+
+static void apple_nvme_complete_rq(struct request *req)
+{
+ apple_nvme_unmap_rq(req);
+ nvme_complete_rq(req);
+}
+
+static void apple_nvme_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, apple_nvme_unmap_rq);
+}
+
+static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
+{
+ struct nvme_completion *hcqe = &q->cqes[q->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
+}
+
+static inline struct blk_mq_tags *
+apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return anv->admin_tagset.tags[0];
+ else
+ return anv->tagset.tags[0];
+}
+
+static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
+ struct io_comp_batch *iob, u16 idx)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct nvme_completion *cqe = &q->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
+ struct request *req;
+
+ apple_nvmmu_inval(q, command_id);
+
+ req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
+ if (unlikely(!req)) {
+ dev_warn(anv->dev, "invalid id %d completed", command_id);
+ return;
+ }
+
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ apple_nvme_complete_batch))
+ apple_nvme_complete_rq(req);
+}
+
+static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
+{
+ u32 tmp = q->cq_head + 1;
+
+ if (tmp == apple_nvme_queue_depth(q)) {
+ q->cq_head = 0;
+ q->cq_phase ^= 1;
+ } else {
+ q->cq_head = tmp;
+ }
+}
+
+static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
+ struct io_comp_batch *iob)
+{
+ bool found = false;
+
+ while (apple_nvme_cqe_pending(q)) {
+ found = true;
+
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ apple_nvme_handle_cqe(q, iob, q->cq_head);
+ apple_nvme_update_cq_head(q);
+ }
+
+ if (found)
+ writel(q->cq_head, q->cq_db);
+
+ return found;
+}
+
+static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
+{
+ bool found;
+ DEFINE_IO_COMP_BATCH(iob);
+
+ if (!READ_ONCE(q->enabled) && !force)
+ return false;
+
+ found = apple_nvme_poll_cq(q, &iob);
+
+ if (!rq_list_empty(iob.req_list))
+ apple_nvme_complete_batch(&iob);
+
+ return found;
+}
+
+static irqreturn_t apple_nvme_irq(int irq, void *data)
+{
+ struct apple_nvme *anv = data;
+ bool handled = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&anv->lock, flags);
+ if (apple_nvme_handle_cq(&anv->ioq, false))
+ handled = true;
+ if (apple_nvme_handle_cq(&anv->adminq, false))
+ handled = true;
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ if (handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static int apple_nvme_create_cq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(1);
+ c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
+ c.create_cq.irq_vector = cpu_to_le16(0);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_cq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ c.delete_queue.opcode = nvme_admin_delete_cq;
+ c.delete_queue.qid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_create_sq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(1);
+ c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
+ c.create_sq.cqid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_sq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ c.delete_queue.opcode = nvme_admin_delete_sq;
+ c.delete_queue.qid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct apple_nvme_queue *q = hctx->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct request *req = bd->rq;
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_command *cmnd = &iod->cmd;
+ blk_status_t ret;
+
+ iod->npages = -1;
+ iod->nents = 0;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!READ_ONCE(q->enabled)))
+ return BLK_STS_IOERR;
+
+ if (!nvme_check_ready(&anv->ctrl, req, true))
+ return nvme_fail_nonready_command(&anv->ctrl, req);
+
+ ret = nvme_setup_cmd(ns, req);
+ if (ret)
+ return ret;
+
+ if (blk_rq_nr_phys_segments(req)) {
+ ret = apple_nvme_map_data(anv, req, cmnd);
+ if (ret)
+ goto out_free_cmd;
+ }
+
+ blk_mq_start_request(req);
+ apple_nvme_submit_cmd(q, cmnd);
+ return BLK_STS_OK;
+
+out_free_cmd:
+ nvme_cleanup_cmd(req);
+ return ret;
+}
+
+static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ hctx->driver_data = data;
+ return 0;
+}
+
+static int apple_nvme_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct apple_nvme_queue *q = set->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_request *nreq = nvme_req(req);
+
+ iod->q = q;
+ nreq->ctrl = &anv->ctrl;
+ nreq->cmd = &iod->cmd;
+
+ return 0;
+}
+
+static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
+{
+ u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+ bool dead = false, freeze = false;
+ unsigned long flags;
+
+ if (apple_rtkit_is_crashed(anv->rtk))
+ dead = true;
+ if (!(csts & NVME_CSTS_RDY))
+ dead = true;
+ if (csts & NVME_CSTS_CFS)
+ dead = true;
+
+ if (anv->ctrl.state == NVME_CTRL_LIVE ||
+ anv->ctrl.state == NVME_CTRL_RESETTING) {
+ freeze = true;
+ nvme_start_freeze(&anv->ctrl);
+ }
+
+ /*
+ * Give the controller a chance to complete all entered requests if
+ * doing a safe shutdown.
+ */
+ if (!dead && shutdown && freeze)
+ nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
+
+ nvme_stop_queues(&anv->ctrl);
+
+ if (!dead) {
+ if (READ_ONCE(anv->ioq.enabled)) {
+ apple_nvme_remove_sq(anv);
+ apple_nvme_remove_cq(anv);
+ }
+
+ if (shutdown)
+ nvme_shutdown_ctrl(&anv->ctrl);
+ nvme_disable_ctrl(&anv->ctrl);
+ }
+
+ WRITE_ONCE(anv->ioq.enabled, false);
+ WRITE_ONCE(anv->adminq.enabled, false);
+ mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
+ nvme_stop_admin_queue(&anv->ctrl);
+
+ /* last chance to complete any requests before nvme_cancel_request */
+ spin_lock_irqsave(&anv->lock, flags);
+ apple_nvme_handle_cq(&anv->ioq, true);
+ apple_nvme_handle_cq(&anv->adminq, true);
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl);
+ blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request,
+ &anv->ctrl);
+ blk_mq_tagset_wait_completed_request(&anv->tagset);
+ blk_mq_tagset_wait_completed_request(&anv->admin_tagset);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ if (shutdown) {
+ nvme_start_queues(&anv->ctrl);
+ nvme_start_admin_queue(&anv->ctrl);
+ }
+}
+
+static enum blk_eh_timer_return apple_nvme_timeout(struct request *req,
+ bool reserved)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct apple_nvme_queue *q = iod->q;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ unsigned long flags;
+ u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+
+ if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * From rdma.c:
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
+ */
+ dev_warn(anv->dev,
+ "I/O %d(aq:%d) timeout while not in live state\n",
+ req->tag, q->is_adminq);
+ if (blk_mq_request_started(req) &&
+ !blk_mq_request_completed(req)) {
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ blk_mq_complete_request(req);
+ }
+ return BLK_EH_DONE;
+ }
+
+ /* check if we just missed an interrupt if we're still alive */
+ if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
+ spin_lock_irqsave(&anv->lock, flags);
+ apple_nvme_handle_cq(q, false);
+ spin_unlock_irqrestore(&anv->lock, flags);
+ if (blk_mq_request_completed(req)) {
+ dev_warn(anv->dev,
+ "I/O %d(aq:%d) timeout: completion polled\n",
+ req->tag, q->is_adminq);
+ return BLK_EH_DONE;
+ }
+ }
+
+ /*
+ * aborting commands isn't supported which leaves a full reset as our
+ * only option here
+ */
+ dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
+ req->tag, q->is_adminq);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ apple_nvme_disable(anv, false);
+ nvme_reset_ctrl(&anv->ctrl);
+ return BLK_EH_DONE;
+}
+
+static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
+ struct io_comp_batch *iob)
+{
+ struct apple_nvme_queue *q = hctx->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ bool found;
+ unsigned long flags;
+
+ spin_lock_irqsave(&anv->lock, flags);
+ found = apple_nvme_poll_cq(q, iob);
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ return found;
+}
+
+static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
+ .queue_rq = apple_nvme_queue_rq,
+ .complete = apple_nvme_complete_rq,
+ .init_hctx = apple_nvme_init_hctx,
+ .init_request = apple_nvme_init_request,
+ .timeout = apple_nvme_timeout,
+};
+
+static const struct blk_mq_ops apple_nvme_mq_ops = {
+ .queue_rq = apple_nvme_queue_rq,
+ .complete = apple_nvme_complete_rq,
+ .init_hctx = apple_nvme_init_hctx,
+ .init_request = apple_nvme_init_request,
+ .timeout = apple_nvme_timeout,
+ .poll = apple_nvme_poll,
+};
+
+static void apple_nvme_init_queue(struct apple_nvme_queue *q)
+{
+ unsigned int depth = apple_nvme_queue_depth(q);
+
+ q->cq_head = 0;
+ q->cq_phase = 1;
+ memset(q->tcbs, 0,
+ APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+ memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
+ WRITE_ONCE(q->enabled, true);
+ wmb(); /* ensure the first interrupt sees the initialization */
+}
+
+static void apple_nvme_reset_work(struct work_struct *work)
+{
+ unsigned int nr_io_queues = 1;
+ int ret;
+ u32 boot_status, aqa;
+ struct apple_nvme *anv =
+ container_of(work, struct apple_nvme, ctrl.reset_work);
+
+ if (anv->ctrl.state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
+ anv->ctrl.state);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* there's unfortunately no known way to recover if RTKit crashed :( */
+ if (apple_rtkit_is_crashed(anv->rtk)) {
+ dev_err(anv->dev,
+ "RTKit has crashed without any way to recover.");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+ apple_nvme_disable(anv, false);
+
+ /* RTKit must be shut down cleanly for the (soft)-reset to work */
+ if (apple_rtkit_is_running(anv->rtk)) {
+ dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
+ ret = apple_rtkit_shutdown(anv->rtk);
+ if (ret)
+ goto out;
+ }
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ ret = reset_control_assert(anv->reset);
+ if (ret)
+ goto out;
+
+ ret = apple_rtkit_reinit(anv->rtk);
+ if (ret)
+ goto out;
+
+ ret = reset_control_deassert(anv->reset);
+ if (ret)
+ goto out;
+
+ writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+ anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ ret = apple_rtkit_boot(anv->rtk);
+ if (ret) {
+ dev_err(anv->dev, "ANS did not boot");
+ goto out;
+ }
+
+ ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
+ boot_status,
+ boot_status == APPLE_ANS_BOOT_STATUS_OK,
+ USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
+ if (ret) {
+ dev_err(anv->dev, "ANS did not initialize");
+ goto out;
+ }
+
+ dev_dbg(anv->dev, "ANS booted successfully.");
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
+ dma_max_mapping_size(anv->dev) >> 9);
+ anv->ctrl.max_segments = NVME_MAX_SEGS;
+
+ /*
+ * Enable NVMMU and linear submission queues.
+ * While we could keep those disabled and pretend this is slightly
+ * more common NVMe controller we'd still need some quirks (e.g.
+ * sq entries will be 128 bytes) and Apple might drop support for
+ * that mode in the future.
+ */
+ writel(APPLE_ANS_LINEAR_SQ_EN,
+ anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+
+ /* Allow as many pending command as possible for both queues */
+ writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
+ anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+
+ /* Setup the NVMMU for the maximum admin and IO queue depth */
+ writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
+ anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+
+ /*
+ * This is probably a chicken bit: without it all commands where any PRP
+ * is set to zero (including those that don't use that field) fail and
+ * the co-processor complains about "completed with err BAD_CMD-" or
+ * a "NULL_PRP_PTR_ERR" in the syslog
+ */
+ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+ ~APPLE_ANS_PRP_NULL_CHECK,
+ anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+
+ /* Setup the admin queue */
+ aqa = APPLE_NVME_AQ_DEPTH - 1;
+ aqa |= aqa << 16;
+ writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
+ writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
+ writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
+
+ /* Setup NVMMU for both queues */
+ writeq(anv->adminq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+ writeq(anv->ioq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+
+ anv->ctrl.sqsize =
+ APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
+
+ dev_dbg(anv->dev, "Enabling controller now");
+ ret = nvme_enable_ctrl(&anv->ctrl);
+ if (ret)
+ goto out;
+
+ dev_dbg(anv->dev, "Starting admin queue");
+ apple_nvme_init_queue(&anv->adminq);
+ nvme_start_admin_queue(&anv->ctrl);
+
+ if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(anv->ctrl.device,
+ "failed to mark controller CONNECTING\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = nvme_init_ctrl_finish(&anv->ctrl);
+ if (ret)
+ goto out;
+
+ dev_dbg(anv->dev, "Creating IOCQ");
+ ret = apple_nvme_create_cq(anv);
+ if (ret)
+ goto out;
+ dev_dbg(anv->dev, "Creating IOSQ");
+ ret = apple_nvme_create_sq(anv);
+ if (ret)
+ goto out_remove_cq;
+
+ apple_nvme_init_queue(&anv->ioq);
+ nr_io_queues = 1;
+ ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
+ if (ret)
+ goto out_remove_sq;
+ if (nr_io_queues != 1) {
+ ret = -ENXIO;
+ goto out_remove_sq;
+ }
+
+ anv->ctrl.queue_count = nr_io_queues + 1;
+
+ nvme_start_queues(&anv->ctrl);
+ nvme_wait_freeze(&anv->ctrl);
+ blk_mq_update_nr_hw_queues(&anv->tagset, 1);
+ nvme_unfreeze(&anv->ctrl);
+
+ if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(anv->ctrl.device,
+ "failed to mark controller live state\n");
+ ret = -ENODEV;
+ goto out_remove_sq;
+ }
+
+ nvme_start_ctrl(&anv->ctrl);
+
+ dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
+ return;
+
+out_remove_sq:
+ apple_nvme_remove_sq(anv);
+out_remove_cq:
+ apple_nvme_remove_cq(anv);
+out:
+ dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
+ nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+ nvme_get_ctrl(&anv->ctrl);
+ apple_nvme_disable(anv, false);
+ nvme_kill_queues(&anv->ctrl);
+ if (!queue_work(nvme_wq, &anv->remove_work))
+ nvme_put_ctrl(&anv->ctrl);
+}
+
+static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
+{
+ struct apple_nvme *anv =
+ container_of(work, struct apple_nvme, remove_work);
+
+ nvme_put_ctrl(&anv->ctrl);
+ device_release_driver(anv->dev);
+}
+
+static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
+
+ return snprintf(buf, size, "%s\n", dev_name(dev));
+}
+
+static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
+{
+ struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
+
+ if (anv->ctrl.admin_q)
+ blk_put_queue(anv->ctrl.admin_q);
+ put_device(anv->dev);
+}
+
+static const struct nvme_ctrl_ops nvme_ctrl_ops = {
+ .name = "apple-nvme",
+ .module = THIS_MODULE,
+ .flags = 0,
+ .reg_read32 = apple_nvme_reg_read32,
+ .reg_write32 = apple_nvme_reg_write32,
+ .reg_read64 = apple_nvme_reg_read64,
+ .free_ctrl = apple_nvme_free_ctrl,
+ .get_address = apple_nvme_get_address,
+};
+
+static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
+{
+ struct apple_nvme *anv = data;
+
+ flush_work(&anv->ctrl.reset_work);
+ flush_work(&anv->ctrl.scan_work);
+ nvme_put_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
+{
+ int ret;
+
+ anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
+ anv->admin_tagset.nr_hw_queues = 1;
+ anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
+ anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
+ anv->admin_tagset.numa_node = NUMA_NO_NODE;
+ anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
+ anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
+ anv->admin_tagset.driver_data = &anv->adminq;
+
+ ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(anv->dev,
+ (void (*)(void *))blk_mq_free_tag_set,
+ &anv->admin_tagset);
+ if (ret)
+ return ret;
+
+ anv->tagset.ops = &apple_nvme_mq_ops;
+ anv->tagset.nr_hw_queues = 1;
+ anv->tagset.nr_maps = 1;
+ /*
+ * Tags are used as an index to the NVMMU and must be unique across
+ * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
+ * must be marked as reserved in the IO queue.
+ */
+ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+ anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+ anv->tagset.timeout = NVME_IO_TIMEOUT;
+ anv->tagset.numa_node = NUMA_NO_NODE;
+ anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
+ anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
+ anv->tagset.driver_data = &anv->ioq;
+
+ ret = blk_mq_alloc_tag_set(&anv->tagset);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(
+ anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset);
+ if (ret)
+ return ret;
+
+ anv->ctrl.admin_tagset = &anv->admin_tagset;
+ anv->ctrl.tagset = &anv->tagset;
+
+ return 0;
+}
+
+static int apple_nvme_queue_alloc(struct apple_nvme *anv,
+ struct apple_nvme_queue *q)
+{
+ unsigned int depth = apple_nvme_queue_depth(q);
+
+ q->cqes = dmam_alloc_coherent(anv->dev,
+ depth * sizeof(struct nvme_completion),
+ &q->cq_dma_addr, GFP_KERNEL);
+ if (!q->cqes)
+ return -ENOMEM;
+
+ q->sqes = dmam_alloc_coherent(anv->dev,
+ depth * sizeof(struct nvme_command),
+ &q->sq_dma_addr, GFP_KERNEL);
+ if (!q->sqes)
+ return -ENOMEM;
+
+ /*
+ * We need the maximum queue depth here because the NVMMU only has a
+ * single depth configuration shared between both queues.
+ */
+ q->tcbs = dmam_alloc_coherent(anv->dev,
+ APPLE_ANS_MAX_QUEUE_DEPTH *
+ sizeof(struct apple_nvmmu_tcb),
+ &q->tcb_dma_addr, GFP_KERNEL);
+ if (!q->tcbs)
+ return -ENOMEM;
+
+ /*
+ * initialize phase to make sure the allocated and empty memory
+ * doesn't look like a full cq already.
+ */
+ q->cq_phase = 1;
+ return 0;
+}
+
+static void apple_nvme_detach_genpd(struct apple_nvme *anv)
+{
+ int i;
+
+ if (anv->pd_count <= 1)
+ return;
+
+ for (i = anv->pd_count - 1; i >= 0; i--) {
+ if (anv->pd_link[i])
+ device_link_del(anv->pd_link[i]);
+ if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
+ dev_pm_domain_detach(anv->pd_dev[i], true);
+ }
+}
+
+static int apple_nvme_attach_genpd(struct apple_nvme *anv)
+{
+ struct device *dev = anv->dev;
+ int i;
+
+ anv->pd_count = of_count_phandle_with_args(
+ dev->of_node, "power-domains", "#power-domain-cells");
+ if (anv->pd_count <= 1)
+ return 0;
+
+ anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
+ GFP_KERNEL);
+ if (!anv->pd_dev)
+ return -ENOMEM;
+
+ anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
+ GFP_KERNEL);
+ if (!anv->pd_link)
+ return -ENOMEM;
+
+ for (i = 0; i < anv->pd_count; i++) {
+ anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(anv->pd_dev[i])) {
+ apple_nvme_detach_genpd(anv);
+ return PTR_ERR(anv->pd_dev[i]);
+ }
+
+ anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!anv->pd_link[i]) {
+ apple_nvme_detach_genpd(anv);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_nvme *anv;
+ int ret;
+
+ anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
+ if (!anv)
+ return -ENOMEM;
+
+ anv->dev = get_device(dev);
+ anv->adminq.is_adminq = true;
+ platform_set_drvdata(pdev, anv);
+
+ ret = apple_nvme_attach_genpd(anv);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to attach power domains");
+ goto put_dev;
+ }
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ ret = -ENXIO;
+ goto put_dev;
+ }
+
+ anv->irq = platform_get_irq(pdev, 0);
+ if (anv->irq < 0) {
+ ret = anv->irq;
+ goto put_dev;
+ }
+ if (!anv->irq) {
+ ret = -ENXIO;
+ goto put_dev;
+ }
+
+ anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
+ if (IS_ERR(anv->mmio_coproc)) {
+ ret = PTR_ERR(anv->mmio_coproc);
+ goto put_dev;
+ }
+ anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
+ if (IS_ERR(anv->mmio_nvme)) {
+ ret = PTR_ERR(anv->mmio_nvme);
+ goto put_dev;
+ }
+
+ anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+
+ anv->sart = devm_apple_sart_get(dev);
+ if (IS_ERR(anv->sart)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->sart),
+ "Failed to initialize SART");
+ goto put_dev;
+ }
+
+ anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
+ if (IS_ERR(anv->reset)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->reset),
+ "Failed to get reset control");
+ goto put_dev;
+ }
+
+ INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
+ INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
+ spin_lock_init(&anv->lock);
+
+ ret = apple_nvme_queue_alloc(anv, &anv->adminq);
+ if (ret)
+ goto put_dev;
+ ret = apple_nvme_queue_alloc(anv, &anv->ioq);
+ if (ret)
+ goto put_dev;
+
+ anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
+ if (!anv->prp_page_pool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ anv->prp_small_pool =
+ dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
+ if (!anv->prp_small_pool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
+ anv->iod_mempool =
+ mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
+ if (!anv->iod_mempool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ret = devm_add_action_or_reset(
+ anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool);
+ if (ret)
+ goto put_dev;
+
+ ret = apple_nvme_alloc_tagsets(anv);
+ if (ret)
+ goto put_dev;
+
+ ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
+ "nvme-apple", anv);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request IRQ");
+ goto put_dev;
+ }
+
+ anv->rtk =
+ devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
+ if (IS_ERR(anv->rtk)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
+ "Failed to initialize RTKit");
+ goto put_dev;
+ }
+
+ ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
+ NVME_QUIRK_SKIP_CID_GEN);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
+ goto put_dev;
+ }
+
+ anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ if (IS_ERR(anv->ctrl.admin_q)) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ if (!blk_get_queue(anv->ctrl.admin_q)) {
+ nvme_start_admin_queue(&anv->ctrl);
+ blk_cleanup_queue(anv->ctrl.admin_q);
+ anv->ctrl.admin_q = NULL;
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
+ nvme_reset_ctrl(&anv->ctrl);
+ async_schedule(apple_nvme_async_probe, anv);
+
+ return 0;
+
+put_dev:
+ put_device(anv->dev);
+ return ret;
+}
+
+static int apple_nvme_remove(struct platform_device *pdev)
+{
+ struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+ nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+ flush_work(&anv->ctrl.reset_work);
+ nvme_stop_ctrl(&anv->ctrl);
+ nvme_remove_namespaces(&anv->ctrl);
+ apple_nvme_disable(anv, true);
+ nvme_uninit_ctrl(&anv->ctrl);
+
+ if (apple_rtkit_is_running(anv->rtk))
+ apple_rtkit_shutdown(anv->rtk);
+
+ apple_nvme_detach_genpd(anv);
+
+ return 0;
+}
+
+static void apple_nvme_shutdown(struct platform_device *pdev)
+{
+ struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+ apple_nvme_disable(anv, true);
+ if (apple_rtkit_is_running(anv->rtk))
+ apple_rtkit_shutdown(anv->rtk);
+}
+
+static int apple_nvme_resume(struct device *dev)
+{
+ struct apple_nvme *anv = dev_get_drvdata(dev);
+
+ return nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_suspend(struct device *dev)
+{
+ struct apple_nvme *anv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ apple_nvme_disable(anv, true);
+
+ if (apple_rtkit_is_running(anv->rtk))
+ ret = apple_rtkit_shutdown(anv->rtk);
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
+ apple_nvme_resume);
+
+static const struct of_device_id apple_nvme_of_match[] = {
+ { .compatible = "apple,nvme-ans2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
+
+static struct platform_driver apple_nvme_driver = {
+ .driver = {
+ .name = "nvme-apple",
+ .of_match_table = apple_nvme_of_match,
+ .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
+ },
+ .probe = apple_nvme_probe,
+ .remove = apple_nvme_remove,
+ .shutdown = apple_nvme_shutdown,
+};
+module_platform_driver(apple_nvme_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index b496028b6bfa..93c8d07ee328 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -183,7 +183,7 @@ config RESET_RASPBERRYPI
config RESET_RZG2L_USBPHY_CTRL
tristate "Renesas RZ/G2L USBPHY control driver"
- depends on ARCH_R9A07G044 || COMPILE_TEST
+ depends on ARCH_RZG2L || COMPILE_TEST
help
Support for USBPHY Control found on RZ/G2L family. It mainly
controls reset and power down of the USB/PHY.
@@ -240,7 +240,7 @@ config RESET_SUNXI
config RESET_TI_SCI
tristate "TI System Control Interface (TI-SCI) reset driver"
- depends on TI_SCI_PROTOCOL
+ depends on TI_SCI_PROTOCOL || COMPILE_TEST
help
This enables the reset driver support over TI System Control Interface
available on some new TI's SoCs. If you wish to use reset resources
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 61e688882643..f0a076e94118 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -12,6 +12,7 @@
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/acpi.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -1100,13 +1101,25 @@ EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
*
* Convenience wrapper for __reset_control_get() and reset_control_reset().
* This is useful for the common case of devices with single, dedicated reset
- * lines.
+ * lines. _RST firmware method will be called for devices with ACPI.
*/
int __device_reset(struct device *dev, bool optional)
{
struct reset_control *rstc;
int ret;
+#ifdef CONFIG_ACPI
+ acpi_handle handle = ACPI_HANDLE(dev);
+
+ if (handle) {
+ if (!acpi_has_method(handle, "_RST"))
+ return optional ? 0 : -ENOENT;
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL,
+ NULL)))
+ return -EIO;
+ }
+#endif
+
rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
if (IS_ERR(rstc))
return PTR_ERR(rstc);
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index c9bc325ad65a..26dc54778615 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -98,11 +98,17 @@ static const struct meson_reset_param meson_a1_param = {
.level_offset = 0x40,
};
+static const struct meson_reset_param meson_s4_param = {
+ .reg_count = 6,
+ .level_offset = 0x40,
+};
+
static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson8b-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-axg-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
+ { .compatible = "amlogic,meson-s4-reset", .data = &meson_s4_param},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 4dda0daf2c6f..361a68314265 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -144,6 +144,7 @@ static const struct of_device_id reset_simple_dt_ids[] = {
.data = &reset_simple_active_low },
{ .compatible = "aspeed,ast2400-lpc-reset" },
{ .compatible = "aspeed,ast2500-lpc-reset" },
+ { .compatible = "aspeed,ast2600-lpc-reset" },
{ .compatible = "bitmain,bm1880-reset",
.data = &reset_simple_active_low },
{ .compatible = "brcm,bcm4908-misc-pcie-reset",
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index 908c1d5bc41e..146fd5d45e99 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -23,19 +23,32 @@ struct uniphier_glue_reset_soc_data {
struct uniphier_glue_reset_priv {
struct clk_bulk_data clk[MAX_CLKS];
- struct reset_control *rst[MAX_RSTS];
+ struct reset_control_bulk_data rst[MAX_RSTS];
struct reset_simple_data rdata;
const struct uniphier_glue_reset_soc_data *data;
};
+static void uniphier_clk_disable(void *_priv)
+{
+ struct uniphier_glue_reset_priv *priv = _priv;
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+}
+
+static void uniphier_rst_assert(void *_priv)
+{
+ struct uniphier_glue_reset_priv *priv = _priv;
+
+ reset_control_bulk_assert(priv->data->nrsts, priv->rst);
+}
+
static int uniphier_glue_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_glue_reset_priv *priv;
struct resource *res;
resource_size_t size;
- const char *name;
- int i, ret, nr;
+ int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -58,22 +71,28 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
if (ret)
return ret;
- for (i = 0; i < priv->data->nrsts; i++) {
- name = priv->data->reset_names[i];
- priv->rst[i] = devm_reset_control_get_shared(dev, name);
- if (IS_ERR(priv->rst[i]))
- return PTR_ERR(priv->rst[i]);
- }
+ for (i = 0; i < priv->data->nrsts; i++)
+ priv->rst[i].id = priv->data->reset_names[i];
+ ret = devm_reset_control_bulk_get_shared(dev, priv->data->nrsts,
+ priv->rst);
+ if (ret)
+ return ret;
ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk);
if (ret)
return ret;
- for (nr = 0; nr < priv->data->nrsts; nr++) {
- ret = reset_control_deassert(priv->rst[nr]);
- if (ret)
- goto out_rst_assert;
- }
+ ret = devm_add_action_or_reset(dev, uniphier_clk_disable, priv);
+ if (ret)
+ return ret;
+
+ ret = reset_control_bulk_deassert(priv->data->nrsts, priv->rst);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, uniphier_rst_assert, priv);
+ if (ret)
+ return ret;
spin_lock_init(&priv->rdata.lock);
priv->rdata.rcdev.owner = THIS_MODULE;
@@ -84,32 +103,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- ret = devm_reset_controller_register(dev, &priv->rdata.rcdev);
- if (ret)
- goto out_rst_assert;
-
- return 0;
-
-out_rst_assert:
- while (nr--)
- reset_control_assert(priv->rst[nr]);
-
- clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
-
- return ret;
-}
-
-static int uniphier_glue_reset_remove(struct platform_device *pdev)
-{
- struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < priv->data->nrsts; i++)
- reset_control_assert(priv->rst[i]);
-
- clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
-
- return 0;
+ return devm_reset_controller_register(dev, &priv->rdata.rcdev);
}
static const char * const uniphier_pro4_clock_reset_names[] = {
@@ -177,7 +171,6 @@ MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
static struct platform_driver uniphier_glue_reset_driver = {
.probe = uniphier_glue_reset_probe,
- .remove = uniphier_glue_reset_remove,
.driver = {
.name = "uniphier-glue-reset",
.of_match_table = uniphier_glue_reset_match,
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 904eec2a7871..e8228c4e5d18 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
-obj-$(CONFIG_ARCH_APPLE) += apple/
+obj-y += apple/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
@@ -22,7 +22,7 @@ obj-y += microchip/
obj-y += amlogic/
obj-y += qcom/
obj-y += renesas/
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-$(CONFIG_SOC_SIFIVE) += sifive/
obj-y += sunxi/
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index 9b8de31d6a8f..a1596fefacff 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -17,6 +17,30 @@ config APPLE_PMGR_PWRSTATE
controls for SoC devices. This driver manages them through the
generic power domain framework, and also provides reset support.
+config APPLE_RTKIT
+ tristate "Apple RTKit co-processor IPC protocol"
+ depends on MAILBOX
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SoCs such as the M1 come with various co-processors running
+ their proprietary RTKit operating system. This option enables support
+ for the protocol library used to communicate with those. It is used
+ by various client drivers.
+
+ Say 'y' here if you have an Apple SoC.
+
+config APPLE_SART
+ tristate "Apple SART DMA address filter"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SART is a simple DMA address filter used on Apple SoCs such
+ as the M1. It is usually required for the NVMe coprocessor which does
+ not use a proper IOMMU.
+
+ Say 'y' here if you have an Apple SoC.
+
endmenu
endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index c114e84667e4..e293770cf66d 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -1,2 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o
+
+obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
+apple-rtkit-y = rtkit.o rtkit-crashlog.o
+
+obj-$(CONFIG_APPLE_SART) += apple-sart.o
+apple-sart-y = sart.o
diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c
new file mode 100644
index 000000000000..732deed64660
--- /dev/null
+++ b/drivers/soc/apple/rtkit-crashlog.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+#include "rtkit-internal.h"
+
+#define FOURCC(a, b, c, d) \
+ (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E')
+#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r')
+#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r')
+#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x')
+#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm')
+
+struct apple_rtkit_crashlog_header {
+ u32 fourcc;
+ u32 version;
+ u32 size;
+ u32 flags;
+ u8 _unk[16];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20);
+
+struct apple_rtkit_crashlog_mbox_entry {
+ u64 msg0;
+ u64 msg1;
+ u32 timestamp;
+ u8 _unk[4];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18);
+
+static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u32 idx;
+ u8 *ptr, *end;
+
+ memcpy(&idx, bfr, 4);
+
+ ptr = bfr + 4;
+ end = bfr + size;
+ while (ptr < end) {
+ u8 *newline = memchr(ptr, '\n', end - ptr);
+
+ if (newline) {
+ u8 tmp = *newline;
+ *newline = '\0';
+ dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx,
+ ptr);
+ *newline = tmp;
+ ptr = newline + 1;
+ } else {
+ dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx,
+ ptr);
+ break;
+ }
+ }
+}
+
+static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16);
+}
+
+static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u64 crash_time;
+
+ memcpy(&crash_time, bfr, 8);
+ dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time);
+}
+
+static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u32 type, index, i;
+ size_t n_messages;
+ struct apple_rtkit_crashlog_mbox_entry entry;
+
+ memcpy(&type, bfr + 16, 4);
+ memcpy(&index, bfr + 24, 4);
+ n_messages = (size - 28) / sizeof(entry);
+
+ dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)",
+ type, index);
+ for (i = 0; i < n_messages; ++i) {
+ memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry));
+ dev_warn(rtk->dev, "RTKit: #%03d@%08x: %016llx %016llx", i,
+ entry.timestamp, entry.msg0, entry.msg1);
+ }
+}
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
+{
+ size_t offset;
+ u32 section_fourcc, section_size;
+ struct apple_rtkit_crashlog_header header;
+
+ memcpy(&header, bfr, sizeof(header));
+ if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) {
+ dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x",
+ header.fourcc);
+ return;
+ }
+
+ if (header.size > size) {
+ dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large",
+ header.size);
+ return;
+ }
+
+ size = header.size;
+ offset = sizeof(header);
+
+ while (offset < size) {
+ memcpy(&section_fourcc, bfr + offset, 4);
+ memcpy(&section_size, bfr + offset + 12, 4);
+
+ switch (section_fourcc) {
+ case APPLE_RTKIT_CRASHLOG_HEADER:
+ dev_dbg(rtk->dev, "RTKit: End of crashlog reached");
+ return;
+ case APPLE_RTKIT_CRASHLOG_STR:
+ apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_VERSION:
+ apple_rtkit_crashlog_dump_version(
+ rtk, bfr + offset + 16, section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_MBOX:
+ apple_rtkit_crashlog_dump_mailbox(
+ rtk, bfr + offset + 16, section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_TIME:
+ apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: Unknown crashlog section: %x",
+ section_fourcc);
+ }
+
+ offset += section_size;
+ }
+
+ dev_warn(rtk->dev,
+ "RTKit: End of crashlog reached but no footer present");
+}
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
new file mode 100644
index 000000000000..24bd619ec5e4
--- /dev/null
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_RTKIT_INTERAL_H
+#define _APPLE_RTKIT_INTERAL_H
+
+#include <linux/apple-mailbox.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/workqueue.h>
+
+#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
+#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
+
+struct apple_rtkit {
+ void *cookie;
+ const struct apple_rtkit_ops *ops;
+ struct device *dev;
+
+ const char *mbox_name;
+ int mbox_idx;
+ struct mbox_client mbox_cl;
+ struct mbox_chan *mbox_chan;
+
+ struct completion epmap_completion;
+ struct completion iop_pwr_ack_completion;
+ struct completion ap_pwr_ack_completion;
+
+ int boot_result;
+ int version;
+
+ unsigned int iop_power_state;
+ unsigned int ap_power_state;
+ bool crashed;
+
+ DECLARE_BITMAP(endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+
+ struct apple_rtkit_shmem ioreport_buffer;
+ struct apple_rtkit_shmem crashlog_buffer;
+
+ struct apple_rtkit_shmem syslog_buffer;
+ char *syslog_msg_buffer;
+ size_t syslog_n_entries;
+ size_t syslog_msg_size;
+
+ struct workqueue_struct *wq;
+};
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size);
+
+#endif
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
new file mode 100644
index 000000000000..cf1129e9f76b
--- /dev/null
+++ b/drivers/soc/apple/rtkit.c
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include "rtkit-internal.h"
+
+enum {
+ APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */
+ APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
+ APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
+ APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+};
+
+enum {
+ APPLE_RTKIT_EP_MGMT = 0,
+ APPLE_RTKIT_EP_CRASHLOG = 1,
+ APPLE_RTKIT_EP_SYSLOG = 2,
+ APPLE_RTKIT_EP_DEBUG = 3,
+ APPLE_RTKIT_EP_IOREPORT = 4,
+ APPLE_RTKIT_EP_OSLOG = 8,
+};
+
+#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52)
+
+enum {
+ APPLE_RTKIT_MGMT_HELLO = 1,
+ APPLE_RTKIT_MGMT_HELLO_REPLY = 2,
+ APPLE_RTKIT_MGMT_STARTEP = 5,
+ APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6,
+ APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7,
+ APPLE_RTKIT_MGMT_EPMAP = 8,
+ APPLE_RTKIT_MGMT_EPMAP_REPLY = 8,
+ APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb,
+ APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb,
+};
+
+#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0)
+#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16)
+
+#define APPLE_RTKIT_MGMT_EPMAP_LAST BIT_ULL(51)
+#define APPLE_RTKIT_MGMT_EPMAP_BASE GENMASK_ULL(34, 32)
+#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0)
+
+#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0)
+
+#define APPLE_RTKIT_MGMT_STARTEP_EP GENMASK_ULL(39, 32)
+#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1)
+
+#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0)
+
+#define APPLE_RTKIT_CRASHLOG_CRASH 1
+
+#define APPLE_RTKIT_BUFFER_REQUEST 1
+#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44)
+#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(41, 0)
+
+#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52)
+
+#define APPLE_RTKIT_SYSLOG_LOG 5
+
+#define APPLE_RTKIT_SYSLOG_INIT 8
+#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0)
+#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
+
+#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
+#define APPLE_RTKIT_OSLOG_INIT 1
+#define APPLE_RTKIT_OSLOG_ACK 3
+
+#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
+#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
+
+struct apple_rtkit_msg {
+ struct completion *completion;
+ struct apple_mbox_msg mbox_msg;
+};
+
+struct apple_rtkit_rx_work {
+ struct apple_rtkit *rtk;
+ u8 ep;
+ u64 msg;
+ struct work_struct work;
+};
+
+bool apple_rtkit_is_running(struct apple_rtkit *rtk)
+{
+ if (rtk->crashed)
+ return false;
+ if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+ return false;
+ if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+ return false;
+ return true;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_running);
+
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
+{
+ return rtk->crashed;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
+
+static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+ u64 msg)
+{
+ msg &= ~APPLE_RTKIT_MGMT_TYPE;
+ msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+}
+
+static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
+{
+ u64 reply;
+
+ int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg);
+ int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg);
+ int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver);
+
+ dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver);
+
+ if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) {
+ dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n",
+ min_ver);
+ goto abort_boot;
+ }
+
+ if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) {
+ dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n",
+ max_ver);
+ goto abort_boot;
+ }
+
+ dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n",
+ want_ver);
+ rtk->version = want_ver;
+
+ reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver);
+ reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply);
+
+ return;
+
+abort_boot:
+ rtk->boot_result = -EINVAL;
+ complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg)
+{
+ int i, ep;
+ u64 reply;
+ unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg);
+ u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg);
+
+ dev_dbg(rtk->dev,
+ "RTKit: received endpoint bitmap 0x%lx with base 0x%x\n",
+ bitmap, base);
+
+ for_each_set_bit(i, &bitmap, 32) {
+ ep = 32 * base + i;
+ dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep);
+ set_bit(ep, rtk->endpoints);
+ }
+
+ reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base);
+ if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST)
+ reply |= APPLE_RTKIT_MGMT_EPMAP_LAST;
+ else
+ reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE;
+
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply);
+
+ if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST))
+ return;
+
+ for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) {
+ switch (ep) {
+ /* the management endpoint is started by default */
+ case APPLE_RTKIT_EP_MGMT:
+ break;
+
+ /* without starting these RTKit refuses to boot */
+ case APPLE_RTKIT_EP_SYSLOG:
+ case APPLE_RTKIT_EP_CRASHLOG:
+ case APPLE_RTKIT_EP_DEBUG:
+ case APPLE_RTKIT_EP_IOREPORT:
+ case APPLE_RTKIT_EP_OSLOG:
+ dev_dbg(rtk->dev,
+ "RTKit: Starting system endpoint 0x%02x\n", ep);
+ apple_rtkit_start_ep(rtk, ep);
+ break;
+
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: Unknown system endpoint: 0x%02x\n",
+ ep);
+ }
+ }
+
+ rtk->boot_result = 0;
+ complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk,
+ u64 msg)
+{
+ unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+ dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n",
+ rtk->iop_power_state, new_state);
+ rtk->iop_power_state = new_state;
+
+ complete_all(&rtk->iop_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk,
+ u64 msg)
+{
+ unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+ dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n",
+ rtk->ap_power_state, new_state);
+ rtk->ap_power_state = new_state;
+
+ complete_all(&rtk->ap_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_MGMT_HELLO:
+ apple_rtkit_management_rx_hello(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_EPMAP:
+ apple_rtkit_management_rx_epmap(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK:
+ apple_rtkit_management_rx_iop_pwr_ack(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK:
+ apple_rtkit_management_rx_ap_pwr_ack(rtk, msg);
+ break;
+ default:
+ dev_warn(
+ rtk->dev,
+ "RTKit: unknown management message: 0x%llx (type: 0x%02x)\n",
+ msg, type);
+ }
+}
+
+static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
+ struct apple_rtkit_shmem *buffer,
+ u8 ep, u64 msg)
+{
+ size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
+ u64 reply;
+ int err;
+
+ buffer->buffer = NULL;
+ buffer->iomem = NULL;
+ buffer->is_mapped = false;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ buffer->size = n_4kpages << 12;
+
+ dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
+ buffer->size, &buffer->iova);
+
+ if (buffer->iova &&
+ (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (rtk->ops->shmem_setup) {
+ err = rtk->ops->shmem_setup(rtk->cookie, buffer);
+ if (err)
+ goto error;
+ } else {
+ buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size,
+ &buffer->iova, GFP_KERNEL);
+ if (!buffer->buffer) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ if (!buffer->is_mapped) {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ apple_rtkit_send_message(rtk, ep, reply, NULL, false);
+ }
+
+ return 0;
+
+error:
+ buffer->buffer = NULL;
+ buffer->iomem = NULL;
+ buffer->iova = 0;
+ buffer->size = 0;
+ buffer->is_mapped = false;
+ return err;
+}
+
+static void apple_rtkit_free_buffer(struct apple_rtkit *rtk,
+ struct apple_rtkit_shmem *bfr)
+{
+ if (bfr->size == 0)
+ return;
+
+ if (rtk->ops->shmem_destroy)
+ rtk->ops->shmem_destroy(rtk->cookie, bfr);
+ else if (bfr->buffer)
+ dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova);
+
+ bfr->buffer = NULL;
+ bfr->iomem = NULL;
+ bfr->iova = 0;
+ bfr->size = 0;
+ bfr->is_mapped = false;
+}
+
+static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst,
+ struct apple_rtkit_shmem *bfr, size_t offset,
+ size_t len)
+{
+ if (bfr->iomem)
+ memcpy_fromio(dst, bfr->iomem + offset, len);
+ else
+ memcpy(dst, bfr->buffer + offset, len);
+}
+
+static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+ u8 *bfr;
+
+ if (type != APPLE_RTKIT_CRASHLOG_CRASH) {
+ dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n",
+ msg);
+ return;
+ }
+
+ if (!rtk->crashlog_buffer.size) {
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer,
+ APPLE_RTKIT_EP_CRASHLOG, msg);
+ return;
+ }
+
+ dev_err(rtk->dev, "RTKit: co-processor has crashed\n");
+
+ /*
+ * create a shadow copy here to make sure the co-processor isn't able
+ * to change the log while we're dumping it. this also ensures
+ * the buffer is in normal memory and not iomem for e.g. the SMC
+ */
+ bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL);
+ if (bfr) {
+ apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
+ rtk->crashlog_buffer.size);
+ apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
+ kfree(bfr);
+ } else {
+ dev_err(rtk->dev,
+ "RTKit: Couldn't allocate crashlog shadow buffer\n");
+ }
+
+ rtk->crashed = true;
+ if (rtk->ops->crashed)
+ rtk->ops->crashed(rtk->cookie);
+}
+
+static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer,
+ APPLE_RTKIT_EP_IOREPORT, msg);
+ break;
+ /* unknown, must be ACKed or the co-processor will hang */
+ case 0x8:
+ case 0xc:
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg,
+ NULL, false);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n",
+ msg);
+ }
+}
+
+static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+ rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg);
+ rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg);
+
+ rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL);
+
+ dev_dbg(rtk->dev,
+ "RTKit: syslog initialized: entries: %zd, msg_size: %zd\n",
+ rtk->syslog_n_entries, rtk->syslog_msg_size);
+}
+
+static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 idx = msg & 0xff;
+ char log_context[24];
+ size_t entry_size = 0x20 + rtk->syslog_msg_size;
+
+ if (!rtk->syslog_msg_buffer) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but no syslog_msg_buffer\n");
+ goto done;
+ }
+ if (!rtk->syslog_buffer.size) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but syslog_buffer.size is zero\n");
+ goto done;
+ }
+ if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n");
+ goto done;
+ }
+ if (idx > rtk->syslog_n_entries) {
+ dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n",
+ idx);
+ goto done;
+ }
+
+ apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer,
+ idx * entry_size + 8, sizeof(log_context));
+ apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer,
+ idx * entry_size + 8 + sizeof(log_context),
+ rtk->syslog_msg_size);
+
+ log_context[sizeof(log_context) - 1] = 0;
+ rtk->syslog_msg_buffer[rtk->syslog_msg_size - 1] = 0;
+ dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context,
+ rtk->syslog_msg_buffer);
+
+done:
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false);
+}
+
+static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer,
+ APPLE_RTKIT_EP_SYSLOG, msg);
+ break;
+ case APPLE_RTKIT_SYSLOG_INIT:
+ apple_rtkit_syslog_rx_init(rtk, msg);
+ break;
+ case APPLE_RTKIT_SYSLOG_LOG:
+ apple_rtkit_syslog_rx_log(rtk, msg);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n",
+ msg);
+ }
+}
+
+static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+ u64 ack;
+
+ dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
+ ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
+}
+
+static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_OSLOG_INIT:
+ apple_rtkit_oslog_rx_init(rtk, msg);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ }
+}
+
+static void apple_rtkit_rx_work(struct work_struct *work)
+{
+ struct apple_rtkit_rx_work *rtk_work =
+ container_of(work, struct apple_rtkit_rx_work, work);
+ struct apple_rtkit *rtk = rtk_work->rtk;
+
+ switch (rtk_work->ep) {
+ case APPLE_RTKIT_EP_MGMT:
+ apple_rtkit_management_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_CRASHLOG:
+ apple_rtkit_crashlog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_SYSLOG:
+ apple_rtkit_syslog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_IOREPORT:
+ apple_rtkit_ioreport_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_OSLOG:
+ apple_rtkit_oslog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff:
+ if (rtk->ops->recv_message)
+ rtk->ops->recv_message(rtk->cookie, rtk_work->ep,
+ rtk_work->msg);
+ else
+ dev_warn(
+ rtk->dev,
+ "Received unexpected message to EP%02d: %llx\n",
+ rtk_work->ep, rtk_work->msg);
+ break;
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: message to unknown endpoint %02x: %llx\n",
+ rtk_work->ep, rtk_work->msg);
+ }
+
+ kfree(rtk_work);
+}
+
+static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+{
+ struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
+ struct apple_mbox_msg *msg = mssg;
+ struct apple_rtkit_rx_work *work;
+ u8 ep = msg->msg1;
+
+ /*
+ * The message was read from a MMIO FIFO and we have to make
+ * sure all reads from buffers sent with that message happen
+ * afterwards.
+ */
+ dma_rmb();
+
+ if (!test_bit(ep, rtk->endpoints))
+ dev_warn(rtk->dev,
+ "RTKit: Message to undiscovered endpoint 0x%02x\n",
+ ep);
+
+ if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ rtk->ops->recv_message_early &&
+ rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+ return;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->rtk = rtk;
+ work->ep = ep;
+ work->msg = msg->msg0;
+ INIT_WORK(&work->work, apple_rtkit_rx_work);
+ queue_work(rtk->wq, &work->work);
+}
+
+static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct apple_rtkit_msg *msg =
+ container_of(mssg, struct apple_rtkit_msg, mbox_msg);
+
+ if (r == -ETIME)
+ return;
+
+ if (msg->completion)
+ complete(msg->completion);
+ kfree(msg);
+}
+
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+ struct completion *completion, bool atomic)
+{
+ struct apple_rtkit_msg *msg;
+ int ret;
+ gfp_t flags;
+
+ if (rtk->crashed)
+ return -EINVAL;
+ if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ !apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ if (atomic)
+ flags = GFP_ATOMIC;
+ else
+ flags = GFP_KERNEL;
+
+ msg = kzalloc(sizeof(*msg), flags);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->mbox_msg.msg0 = message;
+ msg->mbox_msg.msg1 = ep;
+ msg->completion = completion;
+
+ /*
+ * The message will be sent with a MMIO write. We need the barrier
+ * here to ensure any previous writes to buffers are visible to the
+ * device before that MMIO write happens.
+ */
+ dma_wmb();
+
+ ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
+ if (ret < 0) {
+ kfree(msg);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
+
+int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
+ unsigned long timeout, bool atomic)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+ long t;
+
+ ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
+ if (ret < 0)
+ return ret;
+
+ if (atomic) {
+ ret = mbox_flush(rtk->mbox_chan, timeout);
+ if (ret < 0)
+ return ret;
+
+ if (try_wait_for_completion(&completion))
+ return 0;
+
+ return -ETIME;
+ } else {
+ t = wait_for_completion_interruptible_timeout(
+ &completion, msecs_to_jiffies(timeout));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIME;
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
+
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
+{
+ u64 msg;
+
+ if (!test_bit(endpoint, rtk->endpoints))
+ return -EINVAL;
+ if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ !apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint);
+ msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG;
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
+
+static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
+{
+ if (rtk->mbox_name)
+ rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
+ rtk->mbox_name);
+ else
+ rtk->mbox_chan =
+ mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
+
+ if (IS_ERR(rtk->mbox_chan))
+ return PTR_ERR(rtk->mbox_chan);
+ return 0;
+}
+
+static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops)
+{
+ struct apple_rtkit *rtk;
+ int ret;
+
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
+ rtk = kzalloc(sizeof(*rtk), GFP_KERNEL);
+ if (!rtk)
+ return ERR_PTR(-ENOMEM);
+
+ rtk->dev = dev;
+ rtk->cookie = cookie;
+ rtk->ops = ops;
+
+ init_completion(&rtk->epmap_completion);
+ init_completion(&rtk->iop_pwr_ack_completion);
+ init_completion(&rtk->ap_pwr_ack_completion);
+
+ bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+ set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+ rtk->mbox_name = mbox_name;
+ rtk->mbox_idx = mbox_idx;
+ rtk->mbox_cl.dev = dev;
+ rtk->mbox_cl.tx_block = false;
+ rtk->mbox_cl.knows_txdone = false;
+ rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
+ rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ dev_name(rtk->dev));
+ if (!rtk->wq) {
+ ret = -ENOMEM;
+ goto free_rtk;
+ }
+
+ ret = apple_rtkit_request_mbox_chan(rtk);
+ if (ret)
+ goto destroy_wq;
+
+ return rtk;
+
+destroy_wq:
+ destroy_workqueue(rtk->wq);
+free_rtk:
+ kfree(rtk);
+ return ERR_PTR(ret);
+}
+
+static int apple_rtkit_wait_for_completion(struct completion *c)
+{
+ long t;
+
+ t = wait_for_completion_interruptible_timeout(c,
+ msecs_to_jiffies(1000));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIME;
+ else
+ return 0;
+}
+
+int apple_rtkit_reinit(struct apple_rtkit *rtk)
+{
+ /* make sure we don't handle any messages while reinitializing */
+ mbox_free_channel(rtk->mbox_chan);
+ flush_workqueue(rtk->wq);
+
+ apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+ kfree(rtk->syslog_msg_buffer);
+
+ rtk->syslog_msg_buffer = NULL;
+ rtk->syslog_n_entries = 0;
+ rtk->syslog_msg_size = 0;
+
+ bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+ set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+ reinit_completion(&rtk->epmap_completion);
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+ reinit_completion(&rtk->ap_pwr_ack_completion);
+
+ rtk->crashed = false;
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+
+ return apple_rtkit_request_mbox_chan(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
+
+static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
+ unsigned int state)
+{
+ u64 msg;
+ int ret;
+
+ reinit_completion(&rtk->ap_pwr_ack_completion);
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+
+ ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ if (rtk->ap_power_state != state)
+ return -EINVAL;
+ return 0;
+}
+
+static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
+ unsigned int state)
+{
+ u64 msg;
+ int ret;
+
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+
+ ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ if (rtk->iop_power_state != state)
+ return -EINVAL;
+ return 0;
+}
+
+int apple_rtkit_boot(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ if (apple_rtkit_is_running(rtk))
+ return 0;
+ if (rtk->crashed)
+ return -EINVAL;
+
+ dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n");
+ ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion);
+ if (ret)
+ return ret;
+ if (rtk->boot_result)
+ return rtk->boot_result;
+
+ dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n");
+ ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_boot);
+
+int apple_rtkit_shutdown(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ /* if OFF is used here the co-processor will not wake up again */
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP);
+ if (ret)
+ return ret;
+
+ return apple_rtkit_reinit(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_shutdown);
+
+int apple_rtkit_quiesce(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_reinit(rtk);
+ if (ret)
+ return ret;
+
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
+
+int apple_rtkit_wake(struct apple_rtkit *rtk)
+{
+ u64 msg;
+
+ if (apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+
+ /*
+ * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
+ * will wait for the completion anyway.
+ */
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+
+ return apple_rtkit_boot(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_wake);
+
+static void apple_rtkit_free(struct apple_rtkit *rtk)
+{
+ mbox_free_channel(rtk->mbox_chan);
+ destroy_workqueue(rtk->wq);
+
+ apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+ kfree(rtk->syslog_msg_buffer);
+ kfree(rtk);
+}
+
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops)
+{
+ struct apple_rtkit *rtk;
+ int ret;
+
+ rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops);
+ if (IS_ERR(rtk))
+ return rtk;
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free,
+ rtk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return rtk;
+}
+EXPORT_SYMBOL_GPL(devm_apple_rtkit_init);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple RTKit driver");
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
new file mode 100644
index 000000000000..83804b16ad03
--- /dev/null
+++ b/drivers/soc/apple/sart.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for some DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done and special support in the
+ * consumer driver is required since not all DMA transactions of
+ * a single device are subject to SART filtering.
+ */
+
+#include <linux/soc/apple/sart.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_SART_MAX_ENTRIES 16
+
+/* This is probably a bitfield but the exact meaning of each bit is unknown. */
+#define APPLE_SART_FLAGS_ALLOW 0xff
+
+/* SARTv2 registers */
+#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
+#define APPLE_SART2_CONFIG_FLAGS GENMASK(31, 24)
+#define APPLE_SART2_CONFIG_SIZE GENMASK(23, 0)
+#define APPLE_SART2_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART2_CONFIG_SIZE_MAX GENMASK(23, 0)
+
+#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART2_PADDR_SHIFT 12
+
+/* SARTv3 registers */
+#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
+
+#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART3_PADDR_SHIFT 12
+
+#define APPLE_SART3_SIZE(idx) (0x80 + 4 * (idx))
+#define APPLE_SART3_SIZE_SHIFT 12
+#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
+
+struct apple_sart_ops {
+ void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size);
+ void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted);
+ unsigned int size_shift;
+ unsigned int paddr_shift;
+ size_t size_max;
+};
+
+struct apple_sart {
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct apple_sart_ops *ops;
+
+ unsigned long protected_entries;
+ unsigned long used_entries;
+};
+
+static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index));
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index));
+ size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg);
+
+ *flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg);
+ *size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART2_PADDR_SHIFT;
+}
+
+static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ u32 cfg;
+
+ cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags);
+ cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted);
+
+ writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index));
+ writel(cfg, sart->regs + APPLE_SART2_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v2 = {
+ .get_entry = sart2_get_entry,
+ .set_entry = sart2_set_entry,
+ .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART2_PADDR_SHIFT,
+ .size_max = APPLE_SART2_CONFIG_SIZE_MAX,
+};
+
+static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index));
+ size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index));
+
+ *flags = readl(sart->regs + APPLE_SART3_CONFIG(index));
+ *size = size_ << APPLE_SART3_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART3_PADDR_SHIFT;
+}
+
+static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index));
+ writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index));
+ writel(flags, sart->regs + APPLE_SART3_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v3 = {
+ .get_entry = sart3_get_entry,
+ .set_entry = sart3_set_entry,
+ .size_shift = APPLE_SART3_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART3_PADDR_SHIFT,
+ .size_max = APPLE_SART3_SIZE_MAX,
+};
+
+static int apple_sart_probe(struct platform_device *pdev)
+{
+ int i;
+ struct apple_sart *sart;
+ struct device *dev = &pdev->dev;
+
+ sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL);
+ if (!sart)
+ return -ENOMEM;
+
+ sart->dev = dev;
+ sart->ops = of_device_get_match_data(dev);
+
+ sart->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sart->regs))
+ return PTR_ERR(sart->regs);
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ u8 flags;
+ size_t size;
+ phys_addr_t paddr;
+
+ sart->ops->get_entry(sart, i, &flags, &paddr, &size);
+
+ if (!flags)
+ continue;
+
+ dev_dbg(sart->dev,
+ "SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n",
+ i, flags, &paddr, size);
+ set_bit(i, &sart->protected_entries);
+ }
+
+ platform_set_drvdata(pdev, sart);
+ return 0;
+}
+
+struct apple_sart *devm_apple_sart_get(struct device *dev)
+{
+ struct device_node *sart_node;
+ struct platform_device *sart_pdev;
+ struct apple_sart *sart;
+ int ret;
+
+ sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
+ if (!sart_node)
+ return ERR_PTR(-ENODEV);
+
+ sart_pdev = of_find_device_by_node(sart_node);
+ of_node_put(sart_node);
+
+ if (!sart_pdev)
+ return ERR_PTR(-ENODEV);
+
+ sart = dev_get_drvdata(&sart_pdev->dev);
+ if (!sart) {
+ put_device(&sart_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device,
+ &sart_pdev->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ device_link_add(dev, &sart_pdev->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
+ return sart;
+}
+EXPORT_SYMBOL_GPL(devm_apple_sart_get);
+
+static int sart_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr, size_t size)
+{
+ if (size & ((1 << sart->ops->size_shift) - 1))
+ return -EINVAL;
+ if (paddr & ((1 << sart->ops->paddr_shift) - 1))
+ return -EINVAL;
+
+ paddr >>= sart->ops->size_shift;
+ size >>= sart->ops->paddr_shift;
+
+ if (size > sart->ops->size_max)
+ return -EINVAL;
+
+ sart->ops->set_entry(sart, index, flags, paddr, size);
+ return 0;
+}
+
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size)
+{
+ int i, ret;
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+ if (test_and_set_bit(i, &sart->used_entries))
+ continue;
+
+ ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+ size);
+ if (ret) {
+ dev_dbg(sart->dev,
+ "unable to set entry %d to [%pa, 0x%zx]\n",
+ i, &paddr, size);
+ clear_bit(i, &sart->used_entries);
+ return ret;
+ }
+
+ dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size,
+ i);
+ return 0;
+ }
+
+ dev_warn(sart->dev,
+ "no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n",
+ &paddr, size);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region);
+
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size)
+{
+ int i;
+
+ dev_dbg(sart->dev,
+ "will remove [paddr: %pa, size: 0x%zx] from allowed regions\n",
+ &paddr, size);
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ u8 eflags;
+ size_t esize;
+ phys_addr_t epaddr;
+
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+
+ sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize);
+
+ if (epaddr != paddr || esize != size)
+ continue;
+
+ sart->ops->set_entry(sart, i, 0, 0, 0);
+
+ clear_bit(i, &sart->used_entries);
+ dev_dbg(sart->dev, "cleared entry %d\n", i);
+ return 0;
+ }
+
+ dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n",
+ &paddr, size);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region);
+
+static void apple_sart_shutdown(struct platform_device *pdev)
+{
+ struct apple_sart *sart = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+
+ sart->ops->set_entry(sart, i, 0, 0, 0);
+ }
+}
+
+static const struct of_device_id apple_sart_of_match[] = {
+ {
+ .compatible = "apple,t6000-sart",
+ .data = &sart_ops_v3,
+ },
+ {
+ .compatible = "apple,t8103-sart",
+ .data = &sart_ops_v2,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_sart_of_match);
+
+static struct platform_driver apple_sart_driver = {
+ .driver = {
+ .name = "apple-sart",
+ .of_match_table = apple_sart_of_match,
+ },
+ .probe = apple_sart_probe,
+ .shutdown = apple_sart_shutdown,
+};
+module_platform_driver(apple_sart_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple SART driver");
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index 7bbe46ea5f94..9407cac47fdb 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -312,6 +312,9 @@ static int bcm_pmb_probe(struct platform_device *pdev)
for (e = table; e->name; e++) {
struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
pd->pmb = pmb;
pd->data = e;
pd->genpd.name = e->name;
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 8a707077914c..63cd29f6d4d2 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
+obj-$(CONFIG_SOC_IMX8M) += imx8mp-blk-ctrl.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3cb123016b3e..85aa86e1338a 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -21,10 +21,12 @@
#include <dt-bindings/power/imx8mq-power.h>
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
+#include <dt-bindings/power/imx8mp-power.h>
#define GPC_LPCR_A_CORE_BSC 0x000
#define GPC_PGC_CPU_MAPPING 0x0ec
+#define IMX8MP_GPC_PGC_CPU_MAPPING 0x1cc
#define IMX7_USB_HSIC_PHY_A_CORE_DOMAIN BIT(6)
#define IMX7_USB_OTG2_PHY_A_CORE_DOMAIN BIT(5)
@@ -65,6 +67,29 @@
#define IMX8MN_OTG1_A53_DOMAIN BIT(4)
#define IMX8MN_MIPI_A53_DOMAIN BIT(2)
+#define IMX8MP_MEDIA_ISPDWP_A53_DOMAIN BIT(20)
+#define IMX8MP_HSIOMIX_A53_DOMAIN BIT(19)
+#define IMX8MP_MIPI_PHY2_A53_DOMAIN BIT(18)
+#define IMX8MP_HDMI_PHY_A53_DOMAIN BIT(17)
+#define IMX8MP_HDMIMIX_A53_DOMAIN BIT(16)
+#define IMX8MP_VPU_VC8000E_A53_DOMAIN BIT(15)
+#define IMX8MP_VPU_G2_A53_DOMAIN BIT(14)
+#define IMX8MP_VPU_G1_A53_DOMAIN BIT(13)
+#define IMX8MP_MEDIAMIX_A53_DOMAIN BIT(12)
+#define IMX8MP_GPU3D_A53_DOMAIN BIT(11)
+#define IMX8MP_VPUMIX_A53_DOMAIN BIT(10)
+#define IMX8MP_GPUMIX_A53_DOMAIN BIT(9)
+#define IMX8MP_GPU2D_A53_DOMAIN BIT(8)
+#define IMX8MP_AUDIOMIX_A53_DOMAIN BIT(7)
+#define IMX8MP_MLMIX_A53_DOMAIN BIT(6)
+#define IMX8MP_USB2_PHY_A53_DOMAIN BIT(5)
+#define IMX8MP_USB1_PHY_A53_DOMAIN BIT(4)
+#define IMX8MP_PCIE_PHY_A53_DOMAIN BIT(3)
+#define IMX8MP_MIPI_PHY1_A53_DOMAIN BIT(2)
+
+#define IMX8MP_GPC_PU_PGC_SW_PUP_REQ 0x0d8
+#define IMX8MP_GPC_PU_PGC_SW_PDN_REQ 0x0e4
+
#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
#define GPC_PU_PGC_SW_PDN_REQ 0x104
@@ -107,8 +132,30 @@
#define IMX8MN_OTG1_SW_Pxx_REQ BIT(2)
#define IMX8MN_MIPI_SW_Pxx_REQ BIT(0)
+#define IMX8MP_DDRMIX_Pxx_REQ BIT(19)
+#define IMX8MP_MEDIA_ISP_DWP_Pxx_REQ BIT(18)
+#define IMX8MP_HSIOMIX_Pxx_REQ BIT(17)
+#define IMX8MP_MIPI_PHY2_Pxx_REQ BIT(16)
+#define IMX8MP_HDMI_PHY_Pxx_REQ BIT(15)
+#define IMX8MP_HDMIMIX_Pxx_REQ BIT(14)
+#define IMX8MP_VPU_VC8K_Pxx_REQ BIT(13)
+#define IMX8MP_VPU_G2_Pxx_REQ BIT(12)
+#define IMX8MP_VPU_G1_Pxx_REQ BIT(11)
+#define IMX8MP_MEDIMIX_Pxx_REQ BIT(10)
+#define IMX8MP_GPU_3D_Pxx_REQ BIT(9)
+#define IMX8MP_VPU_MIX_SHARE_LOGIC_Pxx_REQ BIT(8)
+#define IMX8MP_GPU_SHARE_LOGIC_Pxx_REQ BIT(7)
+#define IMX8MP_GPU_2D_Pxx_REQ BIT(6)
+#define IMX8MP_AUDIOMIX_Pxx_REQ BIT(5)
+#define IMX8MP_MLMIX_Pxx_REQ BIT(4)
+#define IMX8MP_USB2_PHY_Pxx_REQ BIT(3)
+#define IMX8MP_USB1_PHY_Pxx_REQ BIT(2)
+#define IMX8MP_PCIE_PHY_SW_Pxx_REQ BIT(1)
+#define IMX8MP_MIPI_PHY1_SW_Pxx_REQ BIT(0)
+
#define GPC_M4_PU_PDN_FLG 0x1bc
+#define IMX8MP_GPC_PU_PWRHSK 0x190
#define GPC_PU_PWRHSK 0x1fc
#define IMX8M_GPU_HSK_PWRDNACKN BIT(26)
@@ -118,7 +165,6 @@
#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
-
#define IMX8MM_GPUMIX_HSK_PWRDNACKN BIT(29)
#define IMX8MM_GPU_HSK_PWRDNACKN (BIT(27) | BIT(28))
#define IMX8MM_VPUMIX_HSK_PWRDNACKN BIT(26)
@@ -137,6 +183,21 @@
#define IMX8MN_DISPMIX_HSK_PWRDNREQN BIT(7)
#define IMX8MN_HSIO_HSK_PWRDNREQN BIT(5)
+#define IMX8MP_MEDIAMIX_PWRDNACKN BIT(30)
+#define IMX8MP_HDMIMIX_PWRDNACKN BIT(29)
+#define IMX8MP_HSIOMIX_PWRDNACKN BIT(28)
+#define IMX8MP_VPUMIX_PWRDNACKN BIT(26)
+#define IMX8MP_GPUMIX_PWRDNACKN BIT(25)
+#define IMX8MP_MLMIX_PWRDNACKN (BIT(23) | BIT(24))
+#define IMX8MP_AUDIOMIX_PWRDNACKN (BIT(20) | BIT(31))
+#define IMX8MP_MEDIAMIX_PWRDNREQN BIT(14)
+#define IMX8MP_HDMIMIX_PWRDNREQN BIT(13)
+#define IMX8MP_HSIOMIX_PWRDNREQN BIT(12)
+#define IMX8MP_VPUMIX_PWRDNREQN BIT(10)
+#define IMX8MP_GPUMIX_PWRDNREQN BIT(9)
+#define IMX8MP_MLMIX_PWRDNREQN (BIT(7) | BIT(8))
+#define IMX8MP_AUDIOMIX_PWRDNREQN (BIT(4) | BIT(15))
+
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -179,14 +240,44 @@
#define IMX8MN_PGC_GPUMIX 23
#define IMX8MN_PGC_DISPMIX 26
+#define IMX8MP_PGC_NOC 9
+#define IMX8MP_PGC_MIPI1 12
+#define IMX8MP_PGC_PCIE 13
+#define IMX8MP_PGC_USB1 14
+#define IMX8MP_PGC_USB2 15
+#define IMX8MP_PGC_MLMIX 16
+#define IMX8MP_PGC_AUDIOMIX 17
+#define IMX8MP_PGC_GPU2D 18
+#define IMX8MP_PGC_GPUMIX 19
+#define IMX8MP_PGC_VPUMIX 20
+#define IMX8MP_PGC_GPU3D 21
+#define IMX8MP_PGC_MEDIAMIX 22
+#define IMX8MP_PGC_VPU_G1 23
+#define IMX8MP_PGC_VPU_G2 24
+#define IMX8MP_PGC_VPU_VC8000E 25
+#define IMX8MP_PGC_HDMIMIX 26
+#define IMX8MP_PGC_HDMI 27
+#define IMX8MP_PGC_MIPI2 28
+#define IMX8MP_PGC_HSIOMIX 29
+#define IMX8MP_PGC_MEDIA_ISP_DWP 30
+#define IMX8MP_PGC_DDRMIX 31
+
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
#define GPC_PGC_CTRL_PCR BIT(0)
+struct imx_pgc_regs {
+ u16 map;
+ u16 pup;
+ u16 pdn;
+ u16 hsk;
+};
+
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
+ const struct imx_pgc_regs *regs;
struct regulator *regulator;
struct reset_control *reset;
struct clk_bulk_data *clks;
@@ -204,12 +295,16 @@ struct imx_pgc_domain {
const int voltage;
const bool keep_clocks;
struct device *dev;
+
+ unsigned int pgc_sw_pup_reg;
+ unsigned int pgc_sw_pdn_reg;
};
struct imx_pgc_domain_data {
const struct imx_pgc_domain *domains;
size_t domains_num;
const struct regmap_access_table *reg_access_table;
+ const struct imx_pgc_regs *pgc_regs;
};
static inline struct imx_pgc_domain *
@@ -249,14 +344,14 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
if (domain->bits.pxx) {
/* request the domain to power up */
- regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PUP_REQ,
+ regmap_update_bits(domain->regmap, domain->regs->pup,
domain->bits.pxx, domain->bits.pxx);
/*
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
ret = regmap_read_poll_timeout(domain->regmap,
- GPC_PU_PGC_SW_PUP_REQ, reg_val,
+ domain->regs->pup, reg_val,
!(reg_val & domain->bits.pxx),
0, USEC_PER_MSEC);
if (ret) {
@@ -278,11 +373,11 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
/* request the ADB400 to power up */
if (domain->bits.hskreq) {
- regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
+ regmap_update_bits(domain->regmap, domain->regs->hsk,
domain->bits.hskreq, domain->bits.hskreq);
/*
- * ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK, reg_val,
+ * ret = regmap_read_poll_timeout(domain->regmap, domain->regs->hsk, reg_val,
* (reg_val & domain->bits.hskack), 0,
* USEC_PER_MSEC);
* Technically we need the commented code to wait handshake. But that needs
@@ -329,10 +424,10 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
/* request the ADB400 to power down */
if (domain->bits.hskreq) {
- regmap_clear_bits(domain->regmap, GPC_PU_PWRHSK,
+ regmap_clear_bits(domain->regmap, domain->regs->hsk,
domain->bits.hskreq);
- ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK,
+ ret = regmap_read_poll_timeout(domain->regmap, domain->regs->hsk,
reg_val,
!(reg_val & domain->bits.hskack),
0, USEC_PER_MSEC);
@@ -350,14 +445,14 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
}
/* request the domain to power down */
- regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PDN_REQ,
+ regmap_update_bits(domain->regmap, domain->regs->pdn,
domain->bits.pxx, domain->bits.pxx);
/*
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
ret = regmap_read_poll_timeout(domain->regmap,
- GPC_PU_PGC_SW_PDN_REQ, reg_val,
+ domain->regs->pdn, reg_val,
!(reg_val & domain->bits.pxx),
0, USEC_PER_MSEC);
if (ret) {
@@ -442,10 +537,18 @@ static const struct regmap_access_table imx7_access_table = {
.n_yes_ranges = ARRAY_SIZE(imx7_yes_ranges),
};
+static const struct imx_pgc_regs imx7_pgc_regs = {
+ .map = GPC_PGC_CPU_MAPPING,
+ .pup = GPC_PU_PGC_SW_PUP_REQ,
+ .pdn = GPC_PU_PGC_SW_PDN_REQ,
+ .hsk = GPC_PU_PWRHSK,
+};
+
static const struct imx_pgc_domain_data imx7_pgc_domain_data = {
.domains = imx7_pgc_domains,
.domains_num = ARRAY_SIZE(imx7_pgc_domains),
.reg_access_table = &imx7_access_table,
+ .pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8m_pgc_domains[] = {
@@ -614,6 +717,7 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.domains = imx8m_pgc_domains,
.domains_num = ARRAY_SIZE(imx8m_pgc_domains),
.reg_access_table = &imx8m_access_table,
+ .pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
@@ -804,6 +908,304 @@ static const struct imx_pgc_domain_data imx8mm_pgc_domain_data = {
.domains = imx8mm_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mm_pgc_domains),
.reg_access_table = &imx8mm_access_table,
+ .pgc_regs = &imx7_pgc_regs,
+};
+
+static const struct imx_pgc_domain imx8mp_pgc_domains[] = {
+ [IMX8MP_POWER_DOMAIN_MIPI_PHY1] = {
+ .genpd = {
+ .name = "mipi-phy1",
+ },
+ .bits = {
+ .pxx = IMX8MP_MIPI_PHY1_SW_Pxx_REQ,
+ .map = IMX8MP_MIPI_PHY1_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MIPI1),
+ },
+
+ [IMX8MP_POWER_DOMAIN_PCIE_PHY] = {
+ .genpd = {
+ .name = "pcie-phy1",
+ },
+ .bits = {
+ .pxx = IMX8MP_PCIE_PHY_SW_Pxx_REQ,
+ .map = IMX8MP_PCIE_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_PCIE),
+ },
+
+ [IMX8MP_POWER_DOMAIN_USB1_PHY] = {
+ .genpd = {
+ .name = "usb-otg1",
+ },
+ .bits = {
+ .pxx = IMX8MP_USB1_PHY_Pxx_REQ,
+ .map = IMX8MP_USB1_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_USB1),
+ },
+
+ [IMX8MP_POWER_DOMAIN_USB2_PHY] = {
+ .genpd = {
+ .name = "usb-otg2",
+ },
+ .bits = {
+ .pxx = IMX8MP_USB2_PHY_Pxx_REQ,
+ .map = IMX8MP_USB2_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_USB2),
+ },
+
+ [IMX8MP_POWER_DOMAIN_MLMIX] = {
+ .genpd = {
+ .name = "mlmix",
+ },
+ .bits = {
+ .pxx = IMX8MP_MLMIX_Pxx_REQ,
+ .map = IMX8MP_MLMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_MLMIX_PWRDNREQN,
+ .hskack = IMX8MP_MLMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MLMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_AUDIOMIX] = {
+ .genpd = {
+ .name = "audiomix",
+ },
+ .bits = {
+ .pxx = IMX8MP_AUDIOMIX_Pxx_REQ,
+ .map = IMX8MP_AUDIOMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_AUDIOMIX_PWRDNREQN,
+ .hskack = IMX8MP_AUDIOMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_AUDIOMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_GPU2D] = {
+ .genpd = {
+ .name = "gpu2d",
+ },
+ .bits = {
+ .pxx = IMX8MP_GPU_2D_Pxx_REQ,
+ .map = IMX8MP_GPU2D_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_GPU2D),
+ },
+
+ [IMX8MP_POWER_DOMAIN_GPUMIX] = {
+ .genpd = {
+ .name = "gpumix",
+ },
+ .bits = {
+ .pxx = IMX8MP_GPU_SHARE_LOGIC_Pxx_REQ,
+ .map = IMX8MP_GPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_GPUMIX_PWRDNREQN,
+ .hskack = IMX8MP_GPUMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_GPUMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPUMIX] = {
+ .genpd = {
+ .name = "vpumix",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_MIX_SHARE_LOGIC_Pxx_REQ,
+ .map = IMX8MP_VPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_VPUMIX_PWRDNREQN,
+ .hskack = IMX8MP_VPUMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_VPUMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_GPU3D] = {
+ .genpd = {
+ .name = "gpu3d",
+ },
+ .bits = {
+ .pxx = IMX8MP_GPU_3D_Pxx_REQ,
+ .map = IMX8MP_GPU3D_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_GPU3D),
+ },
+
+ [IMX8MP_POWER_DOMAIN_MEDIAMIX] = {
+ .genpd = {
+ .name = "mediamix",
+ },
+ .bits = {
+ .pxx = IMX8MP_MEDIMIX_Pxx_REQ,
+ .map = IMX8MP_MEDIAMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_MEDIAMIX_PWRDNREQN,
+ .hskack = IMX8MP_MEDIAMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MEDIAMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPU_G1] = {
+ .genpd = {
+ .name = "vpu-g1",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_G1_Pxx_REQ,
+ .map = IMX8MP_VPU_G1_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_VPU_G1),
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPU_G2] = {
+ .genpd = {
+ .name = "vpu-g2",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_G2_Pxx_REQ,
+ .map = IMX8MP_VPU_G2_A53_DOMAIN
+ },
+ .pgc = BIT(IMX8MP_PGC_VPU_G2),
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPU_VC8000E] = {
+ .genpd = {
+ .name = "vpu-h1",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_VC8K_Pxx_REQ,
+ .map = IMX8MP_VPU_VC8000E_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_VPU_VC8000E),
+ },
+
+ [IMX8MP_POWER_DOMAIN_HDMIMIX] = {
+ .genpd = {
+ .name = "hdmimix",
+ },
+ .bits = {
+ .pxx = IMX8MP_HDMIMIX_Pxx_REQ,
+ .map = IMX8MP_HDMIMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_HDMIMIX_PWRDNREQN,
+ .hskack = IMX8MP_HDMIMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_HDMIMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_HDMI_PHY] = {
+ .genpd = {
+ .name = "hdmi-phy",
+ },
+ .bits = {
+ .pxx = IMX8MP_HDMI_PHY_Pxx_REQ,
+ .map = IMX8MP_HDMI_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_HDMI),
+ },
+
+ [IMX8MP_POWER_DOMAIN_MIPI_PHY2] = {
+ .genpd = {
+ .name = "mipi-phy2",
+ },
+ .bits = {
+ .pxx = IMX8MP_MIPI_PHY2_Pxx_REQ,
+ .map = IMX8MP_MIPI_PHY2_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MIPI2),
+ },
+
+ [IMX8MP_POWER_DOMAIN_HSIOMIX] = {
+ .genpd = {
+ .name = "hsiomix",
+ },
+ .bits = {
+ .pxx = IMX8MP_HSIOMIX_Pxx_REQ,
+ .map = IMX8MP_HSIOMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_HSIOMIX_PWRDNREQN,
+ .hskack = IMX8MP_HSIOMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_HSIOMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP] = {
+ .genpd = {
+ .name = "mediamix-isp-dwp",
+ },
+ .bits = {
+ .pxx = IMX8MP_MEDIA_ISP_DWP_Pxx_REQ,
+ .map = IMX8MP_MEDIA_ISPDWP_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MEDIA_ISP_DWP),
+ },
+};
+
+static const struct regmap_range imx8mp_yes_ranges[] = {
+ regmap_reg_range(GPC_LPCR_A_CORE_BSC,
+ IMX8MP_GPC_PGC_CPU_MAPPING),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_NOC),
+ GPC_PGC_SR(IMX8MP_PGC_NOC)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MIPI1),
+ GPC_PGC_SR(IMX8MP_PGC_MIPI1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_PCIE),
+ GPC_PGC_SR(IMX8MP_PGC_PCIE)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_USB1),
+ GPC_PGC_SR(IMX8MP_PGC_USB1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_USB2),
+ GPC_PGC_SR(IMX8MP_PGC_USB2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MLMIX),
+ GPC_PGC_SR(IMX8MP_PGC_MLMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_AUDIOMIX),
+ GPC_PGC_SR(IMX8MP_PGC_AUDIOMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPU2D),
+ GPC_PGC_SR(IMX8MP_PGC_GPU2D)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPUMIX),
+ GPC_PGC_SR(IMX8MP_PGC_GPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPUMIX),
+ GPC_PGC_SR(IMX8MP_PGC_VPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPU3D),
+ GPC_PGC_SR(IMX8MP_PGC_GPU3D)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MEDIAMIX),
+ GPC_PGC_SR(IMX8MP_PGC_MEDIAMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_G1),
+ GPC_PGC_SR(IMX8MP_PGC_VPU_G1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_G2),
+ GPC_PGC_SR(IMX8MP_PGC_VPU_G2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_VC8000E),
+ GPC_PGC_SR(IMX8MP_PGC_VPU_VC8000E)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HDMIMIX),
+ GPC_PGC_SR(IMX8MP_PGC_HDMIMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HDMI),
+ GPC_PGC_SR(IMX8MP_PGC_HDMI)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MIPI2),
+ GPC_PGC_SR(IMX8MP_PGC_MIPI2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HSIOMIX),
+ GPC_PGC_SR(IMX8MP_PGC_HSIOMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MEDIA_ISP_DWP),
+ GPC_PGC_SR(IMX8MP_PGC_MEDIA_ISP_DWP)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_DDRMIX),
+ GPC_PGC_SR(IMX8MP_PGC_DDRMIX)),
+};
+
+static const struct regmap_access_table imx8mp_access_table = {
+ .yes_ranges = imx8mp_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx8mp_yes_ranges),
+};
+
+static const struct imx_pgc_regs imx8mp_pgc_regs = {
+ .map = IMX8MP_GPC_PGC_CPU_MAPPING,
+ .pup = IMX8MP_GPC_PU_PGC_SW_PUP_REQ,
+ .pdn = IMX8MP_GPC_PU_PGC_SW_PDN_REQ,
+ .hsk = IMX8MP_GPC_PU_PWRHSK,
+};
+static const struct imx_pgc_domain_data imx8mp_pgc_domain_data = {
+ .domains = imx8mp_pgc_domains,
+ .domains_num = ARRAY_SIZE(imx8mp_pgc_domains),
+ .reg_access_table = &imx8mp_access_table,
+ .pgc_regs = &imx8mp_pgc_regs,
};
static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
@@ -895,6 +1297,7 @@ static const struct imx_pgc_domain_data imx8mn_pgc_domain_data = {
.domains = imx8mn_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mn_pgc_domains),
.reg_access_table = &imx8mn_access_table,
+ .pgc_regs = &imx7_pgc_regs,
};
static int imx_pgc_domain_probe(struct platform_device *pdev)
@@ -927,7 +1330,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
pm_runtime_enable(domain->dev);
if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, domain->bits.map);
ret = pm_genpd_init(&domain->genpd, NULL, true);
@@ -953,7 +1356,7 @@ out_genpd_remove:
pm_genpd_remove(&domain->genpd);
out_domain_unmap:
if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
@@ -968,7 +1371,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
pm_genpd_remove(&domain->genpd);
if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
@@ -1099,6 +1502,8 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain = pd_pdev->dev.platform_data;
domain->regmap = regmap;
+ domain->regs = domain_data->pgc_regs;
+
domain->genpd.power_on = imx_pgc_power_up;
domain->genpd.power_off = imx_pgc_power_down;
@@ -1120,6 +1525,7 @@ static const struct of_device_id imx_gpcv2_dt_ids[] = {
{ .compatible = "fsl,imx7d-gpc", .data = &imx7_pgc_domain_data, },
{ .compatible = "fsl,imx8mm-gpc", .data = &imx8mm_pgc_domain_data, },
{ .compatible = "fsl,imx8mn-gpc", .data = &imx8mn_pgc_domain_data, },
+ { .compatible = "fsl,imx8mp-gpc", .data = &imx8mp_pgc_domain_data, },
{ .compatible = "fsl,imx8mq-gpc", .data = &imx8m_pgc_domain_data, },
{ }
};
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index ccd0577a771e..7f49385ed2f8 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -15,11 +15,12 @@
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
+#include <dt-bindings/power/imx8mp-power.h>
#include <dt-bindings/power/imx8mq-power.h>
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
-#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano DISPLAY_BLK_CTRL only */
+#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano/Plus DISPLAY_BLK_CTRL only */
struct imx8m_blk_ctrl_domain;
@@ -41,7 +42,7 @@ struct imx8m_blk_ctrl_domain_data {
u32 clk_mask;
/*
- * i.MX8M Mini and Nano have a third DISPLAY_BLK_CTRL register
+ * i.MX8M Mini, Nano and Plus have a third DISPLAY_BLK_CTRL register
* which is used to control the reset for the MIPI Phy.
* Since it's only present in certain circumstances,
* an if-statement should be used before setting and clearing this
@@ -241,6 +242,7 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
+ dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
@@ -590,6 +592,121 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
};
+static int imx8mp_media_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /* Enable bus clock and deassert bus reset */
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ if (action == GENPD_NOTIFY_ON)
+ udelay(5);
+
+ return NOTIFY_OK;
+}
+
+/*
+ * From i.MX 8M Plus Applications Processor Reference Manual, Rev. 1,
+ * section 13.2.2, 13.2.3
+ * isp-ahb and dwe are not in Figure 13-5. Media BLK_CTRL Clocks
+ */
+static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[] = {
+ [IMX8MP_MEDIABLK_PD_MIPI_DSI_1] = {
+ .name = "mediablk-mipi-dsi-1",
+ .clk_names = (const char *[]){ "apb", "phy", },
+ .num_clks = 2,
+ .gpc_name = "mipi-dsi1",
+ .rst_mask = BIT(0) | BIT(1),
+ .clk_mask = BIT(0) | BIT(1),
+ .mipi_phy_rst_mask = BIT(17),
+ },
+ [IMX8MP_MEDIABLK_PD_MIPI_CSI2_1] = {
+ .name = "mediablk-mipi-csi2-1",
+ .clk_names = (const char *[]){ "apb", "cam1" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi1",
+ .rst_mask = BIT(2) | BIT(3),
+ .clk_mask = BIT(2) | BIT(3),
+ .mipi_phy_rst_mask = BIT(16),
+ },
+ [IMX8MP_MEDIABLK_PD_LCDIF_1] = {
+ .name = "mediablk-lcdif-1",
+ .clk_names = (const char *[]){ "disp1", "apb", "axi", },
+ .num_clks = 3,
+ .gpc_name = "lcdif1",
+ .rst_mask = BIT(4) | BIT(5) | BIT(23),
+ .clk_mask = BIT(4) | BIT(5) | BIT(23),
+ },
+ [IMX8MP_MEDIABLK_PD_ISI] = {
+ .name = "mediablk-isi",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "isi",
+ .rst_mask = BIT(6) | BIT(7),
+ .clk_mask = BIT(6) | BIT(7),
+ },
+ [IMX8MP_MEDIABLK_PD_MIPI_CSI2_2] = {
+ .name = "mediablk-mipi-csi2-2",
+ .clk_names = (const char *[]){ "apb", "cam2" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi2",
+ .rst_mask = BIT(9) | BIT(10),
+ .clk_mask = BIT(9) | BIT(10),
+ .mipi_phy_rst_mask = BIT(30),
+ },
+ [IMX8MP_MEDIABLK_PD_LCDIF_2] = {
+ .name = "mediablk-lcdif-2",
+ .clk_names = (const char *[]){ "disp1", "apb", "axi", },
+ .num_clks = 3,
+ .gpc_name = "lcdif2",
+ .rst_mask = BIT(11) | BIT(12) | BIT(24),
+ .clk_mask = BIT(11) | BIT(12) | BIT(24),
+ },
+ [IMX8MP_MEDIABLK_PD_ISP] = {
+ .name = "mediablk-isp",
+ .clk_names = (const char *[]){ "isp", "axi", "apb" },
+ .num_clks = 3,
+ .gpc_name = "isp",
+ .rst_mask = BIT(16) | BIT(17) | BIT(18),
+ .clk_mask = BIT(16) | BIT(17) | BIT(18),
+ },
+ [IMX8MP_MEDIABLK_PD_DWE] = {
+ .name = "mediablk-dwe",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "dwe",
+ .rst_mask = BIT(19) | BIT(20) | BIT(21),
+ .clk_mask = BIT(19) | BIT(20) | BIT(21),
+ },
+ [IMX8MP_MEDIABLK_PD_MIPI_DSI_2] = {
+ .name = "mediablk-mipi-dsi-2",
+ .clk_names = (const char *[]){ "phy", },
+ .num_clks = 1,
+ .gpc_name = "mipi-dsi2",
+ .rst_mask = BIT(22),
+ .clk_mask = BIT(22),
+ .mipi_phy_rst_mask = BIT(29),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mp_media_blk_ctl_dev_data = {
+ .max_reg = 0x138,
+ .power_notifier_fn = imx8mp_media_power_notifier,
+ .domains = imx8mp_media_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_media_blk_ctl_domain_data),
+};
+
static int imx8mq_vpu_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -663,6 +780,9 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
.compatible = "fsl,imx8mn-disp-blk-ctrl",
.data = &imx8mn_disp_blk_ctl_dev_data
}, {
+ .compatible = "fsl,imx8mp-media-blk-ctrl",
+ .data = &imx8mp_media_blk_ctl_dev_data
+ }, {
.compatible = "fsl,imx8mq-vpu-blk-ctrl",
.data = &imx8mq_vpu_blk_ctl_dev_data
}, {
diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
new file mode 100644
index 000000000000..4ca2ede6871b
--- /dev/null
+++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/power/imx8mp-power.h>
+
+#define GPR_REG0 0x0
+#define PCIE_CLOCK_MODULE_EN BIT(0)
+#define USB_CLOCK_MODULE_EN BIT(1)
+
+struct imx8mp_blk_ctrl_domain;
+
+struct imx8mp_blk_ctrl {
+ struct device *dev;
+ struct notifier_block power_nb;
+ struct device *bus_power_dev;
+ struct regmap *regmap;
+ struct imx8mp_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+ void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+ void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+};
+
+struct imx8mp_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ const char *gpc_name;
+};
+
+#define DOMAIN_MAX_CLKS 2
+
+struct imx8mp_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx8mp_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct device *power_dev;
+ struct imx8mp_blk_ctrl *bc;
+ int id;
+};
+
+struct imx8mp_blk_ctrl_data {
+ int max_reg;
+ notifier_fn_t power_notifier_fn;
+ void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+ void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+ const struct imx8mp_blk_ctrl_domain_data *domains;
+ int num_domains;
+};
+
+static inline struct imx8mp_blk_ctrl_domain *
+to_imx8mp_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx8mp_blk_ctrl_domain, genpd);
+}
+
+static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HSIOBLK_PD_USB:
+ regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ break;
+ case IMX8MP_HSIOBLK_PD_PCIE:
+ regmap_set_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx8mp_hsio_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HSIOBLK_PD_USB:
+ regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ break;
+ case IMX8MP_HSIOBLK_PD_PCIE:
+ regmap_clear_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
+ break;
+ default:
+ break;
+ }
+}
+
+static int imx8mp_hsio_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl,
+ power_nb);
+ struct clk_bulk_data *usb_clk = bc->domains[IMX8MP_HSIOBLK_PD_USB].clks;
+ int num_clks = bc->domains[IMX8MP_HSIOBLK_PD_USB].data->num_clks;
+ int ret;
+
+ switch (action) {
+ case GENPD_NOTIFY_ON:
+ /*
+ * enable USB clock for a moment for the power-on ADB handshake
+ * to proceed
+ */
+ ret = clk_bulk_prepare_enable(num_clks, usb_clk);
+ if (ret)
+ return NOTIFY_BAD;
+ regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+
+ udelay(5);
+
+ regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ clk_bulk_disable_unprepare(num_clks, usb_clk);
+ break;
+ case GENPD_NOTIFY_PRE_OFF:
+ /* enable USB clock for the power-down ADB handshake to work */
+ ret = clk_bulk_prepare_enable(num_clks, usb_clk);
+ if (ret)
+ return NOTIFY_BAD;
+
+ regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ break;
+ case GENPD_NOTIFY_OFF:
+ clk_bulk_disable_unprepare(num_clks, usb_clk);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
+ [IMX8MP_HSIOBLK_PD_USB] = {
+ .name = "hsioblk-usb",
+ .clk_names = (const char *[]){ "usb" },
+ .num_clks = 1,
+ .gpc_name = "usb",
+ },
+ [IMX8MP_HSIOBLK_PD_USB_PHY1] = {
+ .name = "hsioblk-usb-phy1",
+ .gpc_name = "usb-phy1",
+ },
+ [IMX8MP_HSIOBLK_PD_USB_PHY2] = {
+ .name = "hsioblk-usb-phy2",
+ .gpc_name = "usb-phy2",
+ },
+ [IMX8MP_HSIOBLK_PD_PCIE] = {
+ .name = "hsioblk-pcie",
+ .clk_names = (const char *[]){ "pcie" },
+ .num_clks = 1,
+ .gpc_name = "pcie",
+ },
+ [IMX8MP_HSIOBLK_PD_PCIE_PHY] = {
+ .name = "hsioblk-pcie-phy",
+ .gpc_name = "pcie-phy",
+ },
+};
+
+static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = {
+ .max_reg = 0x24,
+ .power_on = imx8mp_hsio_blk_ctrl_power_on,
+ .power_off = imx8mp_hsio_blk_ctrl_power_off,
+ .power_notifier_fn = imx8mp_hsio_power_notifier,
+ .domains = imx8mp_hsio_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_hsio_domain_data),
+};
+
+#define HDMI_RTX_RESET_CTL0 0x20
+#define HDMI_RTX_CLK_CTL0 0x40
+#define HDMI_RTX_CLK_CTL1 0x50
+#define HDMI_RTX_CLK_CTL2 0x60
+#define HDMI_RTX_CLK_CTL3 0x70
+#define HDMI_RTX_CLK_CTL4 0x80
+#define HDMI_TX_CONTROL0 0x200
+
+static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HDMIBLK_PD_IRQSTEER:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16));
+ break;
+ case IMX8MP_HDMIBLK_PD_LCDIF:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+ BIT(19) | BIT(20));
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(4) | BIT(5) | BIT(6));
+ break;
+ case IMX8MP_HDMIBLK_PD_PAI:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18));
+ break;
+ case IMX8MP_HDMIBLK_PD_PVI:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22));
+ break;
+ case IMX8MP_HDMIBLK_PD_TRNG:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(2) | BIT(4) | BIT(5));
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1,
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(18) | BIT(19) | BIT(20) | BIT(21));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(7) | BIT(10) | BIT(11));
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HDMIBLK_PD_IRQSTEER:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16));
+ break;
+ case IMX8MP_HDMIBLK_PD_LCDIF:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(4) | BIT(5) | BIT(6));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+ BIT(19) | BIT(20));
+ break;
+ case IMX8MP_HDMIBLK_PD_PAI:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
+ break;
+ case IMX8MP_HDMIBLK_PD_PVI:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28));
+ break;
+ case IMX8MP_HDMIBLK_PD_TRNG:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX:
+ regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(7) | BIT(10) | BIT(11));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1,
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(18) | BIT(19) | BIT(20) | BIT(21));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(2) | BIT(4) | BIT(5));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ break;
+ default:
+ break;
+ }
+}
+
+static int imx8mp_hdmi_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON)
+ return NOTIFY_OK;
+
+ /*
+ * Contrary to other blk-ctrls the reset and clock don't clear when the
+ * power domain is powered down. To ensure the proper reset pulsing,
+ * first clear them all to asserted state, then enable the bus clocks
+ * and then release the ADB reset.
+ */
+ regmap_write(bc->regmap, HDMI_RTX_RESET_CTL0, 0x0);
+ regmap_write(bc->regmap, HDMI_RTX_CLK_CTL0, 0x0);
+ regmap_write(bc->regmap, HDMI_RTX_CLK_CTL1, 0x0);
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(0) | BIT(1) | BIT(10));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(0));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ udelay(5);
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
+ [IMX8MP_HDMIBLK_PD_IRQSTEER] = {
+ .name = "hdmiblk-irqsteer",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "irqsteer",
+ },
+ [IMX8MP_HDMIBLK_PD_LCDIF] = {
+ .name = "hdmiblk-lcdif",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "lcdif",
+ },
+ [IMX8MP_HDMIBLK_PD_PAI] = {
+ .name = "hdmiblk-pai",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "pai",
+ },
+ [IMX8MP_HDMIBLK_PD_PVI] = {
+ .name = "hdmiblk-pvi",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "pvi",
+ },
+ [IMX8MP_HDMIBLK_PD_TRNG] = {
+ .name = "hdmiblk-trng",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "trng",
+ },
+ [IMX8MP_HDMIBLK_PD_HDMI_TX] = {
+ .name = "hdmiblk-hdmi-tx",
+ .clk_names = (const char *[]){ "apb", "ref_266m" },
+ .num_clks = 2,
+ .gpc_name = "hdmi-tx",
+ },
+ [IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = {
+ .name = "hdmiblk-hdmi-tx-phy",
+ .clk_names = (const char *[]){ "apb", "ref_24m" },
+ .num_clks = 2,
+ .gpc_name = "hdmi-tx-phy",
+ },
+};
+
+static const struct imx8mp_blk_ctrl_data imx8mp_hdmi_blk_ctl_dev_data = {
+ .max_reg = 0x23c,
+ .power_on = imx8mp_hdmi_blk_ctrl_power_on,
+ .power_off = imx8mp_hdmi_blk_ctrl_power_off,
+ .power_notifier_fn = imx8mp_hdmi_power_notifier,
+ .domains = imx8mp_hdmi_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_hdmi_domain_data),
+};
+
+static int imx8mp_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd);
+ const struct imx8mp_blk_ctrl_domain_data *data = domain->data;
+ struct imx8mp_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ /* make sure bus domain is awake */
+ ret = pm_runtime_resume_and_get(bc->bus_power_dev);
+ if (ret < 0) {
+ dev_err(bc->dev, "failed to power up bus domain\n");
+ return ret;
+ }
+
+ /* enable upstream clocks */
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ goto bus_put;
+ }
+
+ /* domain specific blk-ctrl manipulation */
+ bc->power_on(bc, domain);
+
+ /* power up upstream GPC domain */
+ ret = pm_runtime_resume_and_get(domain->power_dev);
+ if (ret < 0) {
+ dev_err(bc->dev, "failed to power up peripheral domain\n");
+ goto clk_disable;
+ }
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ return 0;
+
+clk_disable:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+bus_put:
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd);
+ const struct imx8mp_blk_ctrl_domain_data *data = domain->data;
+ struct imx8mp_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* domain specific blk-ctrl manipulation */
+ bc->power_off(bc, domain);
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ /* power down upstream GPC domain */
+ pm_runtime_put(domain->power_dev);
+
+ /* allow bus domain to suspend */
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+
+static struct generic_pm_domain *
+imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
+{
+ struct genpd_onecell_data *onecell_data = data;
+ unsigned int index = args->args[0];
+
+ if (args->args_count != 1 ||
+ index >= onecell_data->num_domains)
+ return ERR_PTR(-EINVAL);
+
+ return onecell_data->domains[index];
+}
+
+static struct lock_class_key blk_ctrl_genpd_lock_class;
+
+static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
+{
+ const struct imx8mp_blk_ctrl_data *bc_data;
+ struct device *dev = &pdev->dev;
+ struct imx8mp_blk_ctrl *bc;
+ void __iomem *base;
+ int num_domains, i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ bc_data = of_device_get_match_data(dev);
+ num_domains = bc_data->num_domains;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_config.max_register = bc_data->max_reg;
+ bc->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, num_domains,
+ sizeof(struct imx8mp_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains)
+ return -ENOMEM;
+
+ bc->onecell_data.num_domains = num_domains;
+ bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains)
+ return -ENOMEM;
+
+ bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus");
+ if (IS_ERR(bc->bus_power_dev))
+ return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
+ "failed to attach bus power domain\n");
+
+ bc->power_off = bc_data->power_off;
+ bc->power_on = bc_data->power_on;
+
+ for (i = 0; i < num_domains; i++) {
+ const struct imx8mp_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->power_dev =
+ dev_pm_domain_attach_by_name(dev, data->gpc_name);
+ if (IS_ERR(domain->power_dev)) {
+ dev_err_probe(dev, PTR_ERR(domain->power_dev),
+ "failed to attach power domain %s\n",
+ data->gpc_name);
+ ret = PTR_ERR(domain->power_dev);
+ goto cleanup_pds;
+ }
+ dev_set_name(domain->power_dev, "%s", data->name);
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
+ domain->genpd.power_off = imx8mp_blk_ctrl_power_off;
+ domain->bc = bc;
+ domain->id = i;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ dev_pm_domain_detach(domain->power_dev, true);
+ goto cleanup_pds;
+ }
+
+ /*
+ * We use runtime PM to trigger power on/off of the upstream GPC
+ * domain, as a strict hierarchical parent/child power domain
+ * setup doesn't allow us to meet the sequencing requirements.
+ * This means we have nested locking of genpd locks, without the
+ * nesting being visible at the genpd level, so we need a
+ * separate lock class to make lockdep aware of the fact that
+ * this are separate domain locks that can be nested without a
+ * self-deadlock.
+ */
+ lockdep_set_class(&domain->genpd.mlock,
+ &blk_ctrl_genpd_lock_class);
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+ bc->power_nb.notifier_call = bc_data->power_notifier_fn;
+ ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power notifier\n");
+ goto cleanup_provider;
+ }
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_provider:
+ of_genpd_del_provider(dev->of_node);
+cleanup_pds:
+ for (i--; i >= 0; i--) {
+ pm_genpd_remove(&bc->domains[i].genpd);
+ dev_pm_domain_detach(bc->domains[i].power_dev, true);
+ }
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return ret;
+}
+
+static int imx8mp_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx8mp_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ dev_pm_domain_detach(domain->power_dev, true);
+ }
+
+ dev_pm_genpd_remove_notifier(bc->bus_power_dev);
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx8mp_blk_ctrl_suspend(struct device *dev)
+{
+ struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev);
+ int ret, i;
+
+ /*
+ * This may look strange, but is done so the generic PM_SLEEP code
+ * can power down our domains and more importantly power them up again
+ * after resume, without tripping over our usage of runtime PM to
+ * control the upstream GPC domains. Things happen in the right order
+ * in the system suspend/resume paths due to the device parent/child
+ * hierarchy.
+ */
+ ret = pm_runtime_get_sync(bc->bus_power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->bus_power_dev);
+ return ret;
+ }
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
+ struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+
+ ret = pm_runtime_get_sync(domain->power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(domain->power_dev);
+ goto out_fail;
+ }
+ }
+
+ return 0;
+
+out_fail:
+ for (i--; i >= 0; i--)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8mp_blk_ctrl_resume(struct device *dev)
+{
+ struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx8mp_blk_ctrl_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx8mp_blk_ctrl_suspend,
+ imx8mp_blk_ctrl_resume)
+};
+
+static const struct of_device_id imx8mp_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx8mp-hsio-blk-ctrl",
+ .data = &imx8mp_hsio_blk_ctl_dev_data,
+ }, {
+ .compatible = "fsl,imx8mp-hdmi-blk-ctrl",
+ .data = &imx8mp_hdmi_blk_ctl_dev_data,
+ }, {
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match);
+
+static struct platform_driver imx8mp_blk_ctrl_driver = {
+ .probe = imx8mp_blk_ctrl_probe,
+ .remove = imx8mp_blk_ctrl_remove,
+ .driver = {
+ .name = "imx8mp-blk-ctrl",
+ .pm = &imx8mp_blk_ctrl_pm_ops,
+ .of_match_table = imx8mp_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx8mp_blk_ctrl_driver);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index eecafeded56f..4b143cf7b4ce 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -130,6 +130,50 @@ static const struct llcc_slice_config sc7280_data[] = {
{ LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
};
+static const struct llcc_slice_config sc8180x_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sc8280xp_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DRE, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVPFW, 32, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUSS1, 33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUHWT, 36, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
@@ -276,6 +320,20 @@ static const struct qcom_llcc_config sc7280_cfg = {
.reg_offset = llcc_v1_2_reg_offset,
};
+static const struct qcom_llcc_config sc8180x_cfg = {
+ .sct_data = sc8180x_data,
+ .size = ARRAY_SIZE(sc8180x_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
+};
+
+static const struct qcom_llcc_config sc8280xp_cfg = {
+ .sct_data = sc8280xp_data,
+ .size = ARRAY_SIZE(sc8280xp_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
+};
+
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
@@ -741,6 +799,8 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
+ { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfg },
+ { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
{ .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
@@ -749,6 +809,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
static struct platform_driver qcom_llcc_driver = {
.driver = {
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index fc580a3c4336..0034af927b48 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -304,24 +304,23 @@ static void pdr_indication_cb(struct qmi_handle *qmi,
notifier_hdl);
const struct servreg_state_updated_ind *ind_msg = data;
struct pdr_list_node *ind;
- struct pdr_service *pds;
- bool found = false;
+ struct pdr_service *pds = NULL, *iter;
if (!ind_msg || !ind_msg->service_path[0] ||
strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
return;
mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (strcmp(pds->service_path, ind_msg->service_path))
+ list_for_each_entry(iter, &pdr->lookups, node) {
+ if (strcmp(iter->service_path, ind_msg->service_path))
continue;
- found = true;
+ pds = iter;
break;
}
mutex_unlock(&pdr->list_lock);
- if (!found)
+ if (!pds)
return;
pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index ab9ae8cdfa54..a30422214943 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -28,7 +28,7 @@ struct servreg_location_entry {
u32 instance;
};
-struct qmi_elem_info servreg_location_entry_ei[] = {
+static struct qmi_elem_info servreg_location_entry_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -74,7 +74,7 @@ struct servreg_get_domain_list_req {
u32 domain_offset;
};
-struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+static struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -116,7 +116,7 @@ struct servreg_get_domain_list_resp {
struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
};
-struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+static struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -199,7 +199,7 @@ struct servreg_register_listener_req {
char service_path[SERVREG_NAME_LENGTH + 1];
};
-struct qmi_elem_info servreg_register_listener_req_ei[] = {
+static struct qmi_elem_info servreg_register_listener_req_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -227,7 +227,7 @@ struct servreg_register_listener_resp {
enum servreg_service_state curr_state;
};
-struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+static struct qmi_elem_info servreg_register_listener_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -263,7 +263,7 @@ struct servreg_restart_pd_req {
char service_path[SERVREG_NAME_LENGTH + 1];
};
-struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+static struct qmi_elem_info servreg_restart_pd_req_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -280,7 +280,7 @@ struct servreg_restart_pd_resp {
struct qmi_response_type_v01 resp;
};
-struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+static struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -300,7 +300,7 @@ struct servreg_state_updated_ind {
u16 transaction_id;
};
-struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+static struct qmi_elem_info servreg_state_updated_ind_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@@ -336,7 +336,7 @@ struct servreg_set_ack_req {
u16 transaction_id;
};
-struct qmi_elem_info servreg_set_ack_req_ei[] = {
+static struct qmi_elem_info servreg_set_ack_req_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -362,7 +362,7 @@ struct servreg_set_ack_resp {
struct qmi_response_type_v01 resp;
};
-struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+static struct qmi_elem_info servreg_set_ack_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 58f1dc9b9cb7..05fff8691ee3 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -180,6 +180,36 @@ static struct rpmhpd mxc_ao = {
.res_name = "mxc.lvl",
};
+static struct rpmhpd nsp = {
+ .pd = { .name = "nsp", },
+ .res_name = "nsp.lvl",
+};
+
+static struct rpmhpd qphy = {
+ .pd = { .name = "qphy", },
+ .res_name = "qphy.lvl",
+};
+
+/* SA8540P RPMH powerdomains */
+static struct rpmhpd *sa8540p_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+ [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+ [SC8280XP_MMCX_AO] = &mmcx_ao,
+ [SC8280XP_MX] = &mx,
+ [SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_NSP] = &nsp,
+};
+
+static const struct rpmhpd_desc sa8540p_desc = {
+ .rpmhpds = sa8540p_rpmhpds,
+ .num_pds = ARRAY_SIZE(sa8540p_rpmhpds),
+};
+
/* SDM845 RPMH powerdomains */
static struct rpmhpd *sdm845_rpmhpds[] = {
[SDM845_CX] = &cx_w_mx_parent,
@@ -210,6 +240,21 @@ static const struct rpmhpd_desc sdx55_desc = {
.num_pds = ARRAY_SIZE(sdx55_rpmhpds),
};
+/* SDX65 RPMH powerdomains */
+static struct rpmhpd *sdx65_rpmhpds[] = {
+ [SDX65_CX] = &cx_w_mx_parent,
+ [SDX65_CX_AO] = &cx_ao_w_mx_parent,
+ [SDX65_MSS] = &mss,
+ [SDX65_MX] = &mx,
+ [SDX65_MX_AO] = &mx_ao,
+ [SDX65_MXC] = &mxc,
+};
+
+static const struct rpmhpd_desc sdx65_desc = {
+ .rpmhpds = sdx65_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx65_rpmhpds),
+};
+
/* SM6350 RPMH powerdomains */
static struct rpmhpd *sm6350_rpmhpds[] = {
[SM6350_CX] = &cx_w_mx_parent,
@@ -363,12 +408,36 @@ static const struct rpmhpd_desc sc8180x_desc = {
.num_pds = ARRAY_SIZE(sc8180x_rpmhpds),
};
+/* SC8280xp RPMH powerdomains */
+static struct rpmhpd *sc8280xp_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+ [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+ [SC8280XP_MMCX_AO] = &mmcx_ao,
+ [SC8280XP_MX] = &mx,
+ [SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_NSP] = &nsp,
+ [SC8280XP_QPHY] = &qphy,
+};
+
+static const struct rpmhpd_desc sc8280xp_desc = {
+ .rpmhpds = sc8280xp_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc8280xp_rpmhpds),
+};
+
static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc },
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
+ { .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
+ { .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc},
{ .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
@@ -597,10 +666,8 @@ static int rpmhpd_probe(struct platform_device *pdev)
data->num_domains = num_pds;
for (i = 0; i < num_pds; i++) {
- if (!rpmhpds[i]) {
- dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+ if (!rpmhpds[i])
continue;
- }
rpmhpds[i]->dev = dev;
rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index e2057d8f1eff..3e95835653ea 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -195,6 +195,20 @@ struct smem_partition_header {
__le32 reserved[3];
};
+/**
+ * struct smem_partition - describes smem partition
+ * @virt_base: starting virtual address of partition
+ * @phys_base: starting physical address of partition
+ * @cacheline: alignment for "cached" entries
+ * @size: size of partition
+ */
+struct smem_partition {
+ void __iomem *virt_base;
+ phys_addr_t phys_base;
+ size_t cacheline;
+ size_t size;
+};
+
static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
/**
@@ -250,11 +264,9 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
- * @global_partition: pointer to global partition when in use
- * @global_cacheline: cacheline size for global partition
- * @partitions: list of pointers to partitions affecting the current
- * processor/host
- * @cacheline: list of cacheline sizes for each host
+ * @ptable: virtual base of partition table
+ * @global_partition: describes for global partition when in use
+ * @partitions: list of partitions of current processor/host
* @item_count: max accepted item number
* @socinfo: platform device pointer
* @num_regions: number of @regions
@@ -265,12 +277,11 @@ struct qcom_smem {
struct hwspinlock *hwlock;
- struct smem_partition_header *global_partition;
- size_t global_cacheline;
- struct smem_partition_header *partitions[SMEM_HOST_COUNT];
- size_t cacheline[SMEM_HOST_COUNT];
u32 item_count;
struct platform_device *socinfo;
+ struct smem_ptable *ptable;
+ struct smem_partition global_partition;
+ struct smem_partition partitions[SMEM_HOST_COUNT];
unsigned num_regions;
struct smem_region regions[];
@@ -348,18 +359,26 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem,
- struct smem_partition_header *phdr,
+ struct smem_partition *part,
unsigned item,
size_t size)
{
struct smem_private_entry *hdr, *end;
+ struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
+ void *p_end;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
hdr = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);
+ if (WARN_ON((void *)end > p_end || cached > p_end))
+ return -EINVAL;
+
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY)
goto bad_canary;
@@ -369,6 +388,9 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
hdr = uncached_entry_next(hdr);
}
+ if (WARN_ON((void *)hdr > p_end))
+ return -EINVAL;
+
/* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8);
if ((void *)hdr + alloc_size > cached) {
@@ -442,7 +464,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
- struct smem_partition_header *phdr;
+ struct smem_partition *part;
unsigned long flags;
int ret;
@@ -464,12 +486,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
if (ret)
return ret;
- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
- ret = qcom_smem_alloc_private(__smem, phdr, item, size);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
- ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size);
}
@@ -487,6 +509,8 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
struct smem_header *header;
struct smem_region *region;
struct smem_global_entry *entry;
+ u64 entry_offset;
+ u32 e_size;
u32 aux_base;
unsigned i;
@@ -501,9 +525,16 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
region = &smem->regions[i];
if ((u32)region->aux_base == aux_base || !aux_base) {
+ e_size = le32_to_cpu(entry->size);
+ entry_offset = le32_to_cpu(entry->offset);
+
+ if (WARN_ON(e_size + entry_offset > region->size))
+ return ERR_PTR(-EINVAL);
+
if (size != NULL)
- *size = le32_to_cpu(entry->size);
- return region->virt_base + le32_to_cpu(entry->offset);
+ *size = e_size;
+
+ return region->virt_base + entry_offset;
}
}
@@ -511,12 +542,18 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}
static void *qcom_smem_get_private(struct qcom_smem *smem,
- struct smem_partition_header *phdr,
- size_t cacheline,
+ struct smem_partition *part,
unsigned item,
size_t *size)
{
struct smem_private_entry *e, *end;
+ struct smem_partition_header *phdr;
+ void *item_ptr, *p_end;
+ u32 padding_data;
+ u32 e_size;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
e = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
@@ -526,36 +563,65 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
goto invalid_canary;
if (le16_to_cpu(e->item) == item) {
- if (size != NULL)
- *size = le32_to_cpu(e->size) -
- le16_to_cpu(e->padding_data);
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
- return uncached_entry_to_item(e);
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = uncached_entry_to_item(e);
+ if (WARN_ON(item_ptr > p_end))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
}
e = uncached_entry_next(e);
}
+ if (WARN_ON((void *)e > p_end))
+ return ERR_PTR(-EINVAL);
+
/* Item was not found in the uncached list, search the cached list */
- e = phdr_to_first_cached_entry(phdr, cacheline);
+ e = phdr_to_first_cached_entry(phdr, part->cacheline);
end = phdr_to_last_cached_entry(phdr);
+ if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
+ return ERR_PTR(-EINVAL);
+
while (e > end) {
if (e->canary != SMEM_PRIVATE_CANARY)
goto invalid_canary;
if (le16_to_cpu(e->item) == item) {
- if (size != NULL)
- *size = le32_to_cpu(e->size) -
- le16_to_cpu(e->padding_data);
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
- return cached_entry_to_item(e);
+ item_ptr = cached_entry_to_item(e);
+ if (WARN_ON(item_ptr < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
}
- e = cached_entry_next(e, cacheline);
+ e = cached_entry_next(e, part->cacheline);
}
+ if (WARN_ON((void *)e < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
return ERR_PTR(-ENOENT);
invalid_canary:
@@ -576,9 +642,8 @@ invalid_canary:
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
- struct smem_partition_header *phdr;
+ struct smem_partition *part;
unsigned long flags;
- size_t cacheln;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
@@ -594,14 +659,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (ret)
return ERR_PTR(ret);
- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
- cacheln = __smem->cacheline[host];
- ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
- cacheln = __smem->global_cacheline;
- ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ptr = qcom_smem_get_private(__smem, part, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size);
}
@@ -622,6 +685,7 @@ EXPORT_SYMBOL(qcom_smem_get);
*/
int qcom_smem_get_free_space(unsigned host)
{
+ struct smem_partition *part;
struct smem_partition_header *phdr;
struct smem_header *header;
unsigned ret;
@@ -629,23 +693,39 @@ int qcom_smem_get_free_space(unsigned host)
if (!__smem)
return -EPROBE_DEFER;
- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
+
+ if (ret > __smem->regions[0].size)
+ return -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(qcom_smem_get_free_space);
+static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+{
+ return base && (addr >= base && addr < base + size);
+}
+
/**
* qcom_smem_virt_to_phys() - return the physical address associated
* with an smem item pointer (previously returned by qcom_smem_get()
@@ -655,17 +735,36 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
*/
phys_addr_t qcom_smem_virt_to_phys(void *p)
{
- unsigned i;
+ struct smem_partition *part;
+ struct smem_region *area;
+ u64 offset;
+ u32 i;
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ part = &__smem->partitions[i];
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+ }
+
+ part = &__smem->global_partition;
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
for (i = 0; i < __smem->num_regions; i++) {
- struct smem_region *region = &__smem->regions[i];
+ area = &__smem->regions[i];
- if (p < region->virt_base)
- continue;
- if (p < region->virt_base + region->size) {
- u64 offset = p - region->virt_base;
+ if (addr_in_range(area->virt_base, area->size, p)) {
+ offset = p - area->virt_base;
- return region->aux_base + offset;
+ return (phys_addr_t)area->aux_base + offset;
}
}
@@ -689,7 +788,7 @@ static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
struct smem_ptable *ptable;
u32 version;
- ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+ ptable = smem->ptable;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return ERR_PTR(-ENOENT);
@@ -728,9 +827,14 @@ qcom_smem_partition_header(struct qcom_smem *smem,
struct smem_ptable_entry *entry, u16 host0, u16 host1)
{
struct smem_partition_header *header;
+ u32 phys_addr;
u32 size;
- header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
+ header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
+
+ if (!header)
+ return NULL;
if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
@@ -772,7 +876,7 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
bool found = false;
int i;
- if (smem->global_partition) {
+ if (smem->global_partition.virt_base) {
dev_err(smem->dev, "Already found the global partition\n");
return -EINVAL;
}
@@ -807,8 +911,11 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
if (!header)
return -EINVAL;
- smem->global_partition = header;
- smem->global_cacheline = le32_to_cpu(entry->cacheline);
+ smem->global_partition.virt_base = (void __iomem *)header;
+ smem->global_partition.phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->global_partition.size = le32_to_cpu(entry->size);
+ smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
return 0;
}
@@ -848,7 +955,7 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
return -EINVAL;
}
- if (smem->partitions[remote_host]) {
+ if (smem->partitions[remote_host].virt_base) {
dev_err(smem->dev, "duplicate host %hu\n", remote_host);
return -EINVAL;
}
@@ -857,13 +964,47 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
if (!header)
return -EINVAL;
- smem->partitions[remote_host] = header;
- smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
+ smem->partitions[remote_host].virt_base = (void __iomem *)header;
+ smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->partitions[remote_host].size = le32_to_cpu(entry->size);
+ smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
}
return 0;
}
+static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
+{
+ u32 ptable_start;
+
+ /* map starting 4K for smem header */
+ region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
+ ptable_start = region->aux_base + region->size - SZ_4K;
+ /* map last 4k for toc */
+ smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
+
+ if (!region->virt_base || !smem->ptable)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
+{
+ u32 phys_addr;
+
+ phys_addr = smem->regions[0].aux_base;
+
+ smem->regions[0].size = size;
+ smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
+
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
struct smem_region *region)
{
@@ -894,10 +1035,12 @@ static int qcom_smem_probe(struct platform_device *pdev)
struct smem_header *header;
struct reserved_mem *rmem;
struct qcom_smem *smem;
+ unsigned long flags;
size_t array_size;
int num_regions;
int hwlock_id;
u32 version;
+ u32 size;
int ret;
int i;
@@ -933,7 +1076,12 @@ static int qcom_smem_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < num_regions; i++) {
+
+ ret = qcom_smem_map_toc(smem, &smem->regions[0]);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < num_regions; i++) {
smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
smem->regions[i].aux_base,
smem->regions[i].size);
@@ -950,7 +1098,30 @@ static int qcom_smem_probe(struct platform_device *pdev)
return -EINVAL;
}
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
+ if (ret)
+ return ret;
+ size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
+ hwspin_unlock_irqrestore(smem->hwlock, &flags);
+
version = qcom_smem_get_sbl_version(smem);
+ /*
+ * smem header mapping is required only in heap version scheme, so unmap
+ * it here. It will be remapped in qcom_smem_map_global() when whole
+ * partition is mapped again.
+ */
+ devm_iounmap(smem->dev, smem->regions[0].virt_base);
switch (version >> 16) {
case SMEM_GLOBAL_PART_VERSION:
ret = qcom_smem_set_global_partition(smem);
@@ -959,6 +1130,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->item_count = qcom_smem_get_item_count(smem);
break;
case SMEM_GLOBAL_HEAP_VERSION:
+ qcom_smem_map_global(smem, size);
smem->item_count = SMEM_ITEM_COUNT;
break;
default:
@@ -971,17 +1143,6 @@ static int qcom_smem_probe(struct platform_device *pdev)
if (ret < 0 && ret != -ENOENT)
return ret;
- hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
- if (hwlock_id < 0) {
- if (hwlock_id != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
- return hwlock_id;
- }
-
- smem->hwlock = hwspin_lock_request_specific(hwlock_id);
- if (!smem->hwlock)
- return -ENXIO;
-
__smem = smem;
smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 4a157240f419..59dbf4b61e6c 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -493,6 +493,7 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
}
smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(smp2p->ipc_regmap))
return PTR_ERR(smp2p->ipc_regmap);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index ef15d014c03a..9df9bba242f3 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -374,6 +374,7 @@ static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
return 0;
host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(host->ipc_regmap))
return PTR_ERR(host->ipc_regmap);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 8b38d134720a..cee579a267a6 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -236,24 +236,24 @@ static const struct soc_id soc_id[] = {
{ 184, "APQ8074" },
{ 185, "MSM8274" },
{ 186, "MSM8674" },
- { 194, "MSM8974PRO" },
+ { 194, "MSM8974PRO-AC" },
{ 198, "MSM8126" },
{ 199, "APQ8026" },
{ 200, "MSM8926" },
{ 205, "MSM8326" },
{ 206, "MSM8916" },
{ 207, "MSM8994" },
- { 208, "APQ8074-AA" },
- { 209, "APQ8074-AB" },
- { 210, "APQ8074PRO" },
- { 211, "MSM8274-AA" },
- { 212, "MSM8274-AB" },
- { 213, "MSM8274PRO" },
- { 214, "MSM8674-AA" },
- { 215, "MSM8674-AB" },
- { 216, "MSM8674PRO" },
- { 217, "MSM8974-AA" },
- { 218, "MSM8974-AB" },
+ { 208, "APQ8074PRO-AA" },
+ { 209, "APQ8074PRO-AB" },
+ { 210, "APQ8074PRO-AC" },
+ { 211, "MSM8274PRO-AA" },
+ { 212, "MSM8274PRO-AB" },
+ { 213, "MSM8274PRO-AC" },
+ { 214, "MSM8674PRO-AA" },
+ { 215, "MSM8674PRO-AB" },
+ { 216, "MSM8674PRO-AC" },
+ { 217, "MSM8974PRO-AA" },
+ { 218, "MSM8974PRO-AB" },
{ 219, "APQ8028" },
{ 220, "MSM8128" },
{ 221, "MSM8228" },
@@ -330,6 +330,8 @@ static const struct soc_id soc_id[] = {
{ 459, "SM7225" },
{ 460, "SA8540P" },
{ 480, "SM8450" },
+ { 482, "SM8450" },
+ { 487, "SC7280" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index fdc99a05a7e0..c50a6ce1b99d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -47,6 +47,8 @@ config ARCH_RZG2L
config ARCH_RZN1
bool
+ select PM
+ select PM_GENERIC_DOMAINS
select ARM_AMBA
if ARM && ARCH_RENESAS
@@ -268,6 +270,13 @@ config ARCH_R8A779A0
help
This enables support for the Renesas R-Car V3U SoC.
+config ARCH_R8A779G0
+ bool "ARM64 Platform support for R-Car V4H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779G0
+ help
+ This enables support for the Renesas R-Car V4H SoC.
+
config ARCH_R8A774C0
bool "ARM64 Platform support for RZ/G2E"
select ARCH_RCAR_GEN3
@@ -296,6 +305,12 @@ config ARCH_R8A774B1
help
This enables support for the Renesas RZ/G2N SoC.
+config ARCH_R9A07G043
+ bool "ARM64 Platform support for RZ/G2UL"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/G2UL SoC variants.
+
config ARCH_R9A07G044
bool "ARM64 Platform support for RZ/G2L"
select ARCH_RZG2L
@@ -308,6 +323,13 @@ config ARCH_R9A07G054
help
This enables support for the Renesas RZ/V2L SoC variants.
+config ARCH_R9A09G011
+ bool "ARM64 Platform support for RZ/V2M"
+ select PM
+ select PM_GENERIC_DOMAINS
+ help
+ This enables support for the Renesas RZ/V2M SoC.
+
endif # ARM64
config RST_RCAR
@@ -379,6 +401,10 @@ config SYSC_R8A779A0
bool "System Controller support for R-Car V3U" if COMPILE_TEST
select SYSC_RCAR_GEN4
+config SYSC_R8A779G0
+ bool "System Controller support for R-Car V4H" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
config SYSC_RMOBILE
bool "System Controller support for R-Mobile" if COMPILE_TEST
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index deeb41f84f01..535868c9c7e4 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o
+obj-$(CONFIG_SYSC_R8A779G0) += r8a779g0-sysc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
diff --git a/drivers/soc/renesas/r8a779g0-sysc.c b/drivers/soc/renesas/r8a779g0-sysc.c
new file mode 100644
index 000000000000..a452709f066d
--- /dev/null
+++ b/drivers/soc/renesas/r8a779g0-sysc.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V4H System Controller
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/power/r8a779g0-sysc.h>
+
+#include "rcar-gen4-sysc.h"
+
+static struct rcar_gen4_sysc_area r8a779g0_areas[] __initdata = {
+ { "always-on", R8A779G0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "a3e0", R8A779G0_PD_A3E0, R8A779G0_PD_ALWAYS_ON, PD_SCU },
+ { "a2e0d0", R8A779G0_PD_A2E0D0, R8A779G0_PD_A3E0, PD_SCU },
+ { "a2e0d1", R8A779G0_PD_A2E0D1, R8A779G0_PD_A3E0, PD_SCU },
+ { "a1e0d0c0", R8A779G0_PD_A1E0D0C0, R8A779G0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779G0_PD_A1E0D0C1, R8A779G0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d1c0", R8A779G0_PD_A1E0D1C0, R8A779G0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e0d1c1", R8A779G0_PD_A1E0D1C1, R8A779G0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a33dga", R8A779G0_PD_A33DGA, R8A779G0_PD_ALWAYS_ON },
+ { "a23dgb", R8A779G0_PD_A23DGB, R8A779G0_PD_A33DGA },
+ { "a3vip0", R8A779G0_PD_A3VIP0, R8A779G0_PD_ALWAYS_ON },
+ { "a3vip1", R8A779G0_PD_A3VIP1, R8A779G0_PD_ALWAYS_ON },
+ { "a3vip2", R8A779G0_PD_A3VIP2, R8A779G0_PD_ALWAYS_ON },
+ { "a3isp0", R8A779G0_PD_A3ISP0, R8A779G0_PD_ALWAYS_ON },
+ { "a3isp1", R8A779G0_PD_A3ISP1, R8A779G0_PD_ALWAYS_ON },
+ { "a3ir", R8A779G0_PD_A3IR, R8A779G0_PD_ALWAYS_ON },
+ { "a2cn0", R8A779G0_PD_A2CN0, R8A779G0_PD_A3IR },
+ { "a1cnn0", R8A779G0_PD_A1CNN0, R8A779G0_PD_A2CN0 },
+ { "a1dsp0", R8A779G0_PD_A1DSP0, R8A779G0_PD_A2CN0 },
+ { "a1dsp1", R8A779G0_PD_A1DSP1, R8A779G0_PD_A2CN0 },
+ { "a1dsp2", R8A779G0_PD_A1DSP2, R8A779G0_PD_A2CN0 },
+ { "a1dsp3", R8A779G0_PD_A1DSP3, R8A779G0_PD_A2CN0 },
+ { "a2imp01", R8A779G0_PD_A2IMP01, R8A779G0_PD_A3IR },
+ { "a2imp23", R8A779G0_PD_A2IMP23, R8A779G0_PD_A3IR },
+ { "a2psc", R8A779G0_PD_A2PSC, R8A779G0_PD_A3IR },
+ { "a2dma", R8A779G0_PD_A2DMA, R8A779G0_PD_A3IR },
+ { "a2cv0", R8A779G0_PD_A2CV0, R8A779G0_PD_A3IR },
+ { "a2cv1", R8A779G0_PD_A2CV1, R8A779G0_PD_A3IR },
+ { "a2cv2", R8A779G0_PD_A2CV2, R8A779G0_PD_A3IR },
+ { "a2cv3", R8A779G0_PD_A2CV3, R8A779G0_PD_A3IR },
+};
+
+const struct rcar_gen4_sysc_info r8a779g0_sysc_info __initconst = {
+ .areas = r8a779g0_areas,
+ .num_areas = ARRAY_SIZE(r8a779g0_areas),
+};
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.c b/drivers/soc/renesas/rcar-gen4-sysc.c
index 831162a57f9a..9e5e6e077abc 100644
--- a/drivers/soc/renesas/rcar-gen4-sysc.c
+++ b/drivers/soc/renesas/rcar-gen4-sysc.c
@@ -282,6 +282,9 @@ static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A779F0
{ .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A779G0
+ { .compatible = "renesas,r8a779g0-sysc", .data = &r8a779g0_sysc_info },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h
index 0e0bd102b1f9..fe2d98254754 100644
--- a/drivers/soc/renesas/rcar-gen4-sysc.h
+++ b/drivers/soc/renesas/rcar-gen4-sysc.h
@@ -39,5 +39,6 @@ struct rcar_gen4_sysc_info {
extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info;
extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info;
+extern const struct rcar_gen4_sysc_info r8a779g0_sysc_info;
#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 4d293eb2d8f3..e1c7e91f5a86 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -103,6 +103,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
/* R-Car Gen4 */
{ .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 },
{ .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 92c7b42250ee..d171f1b635c7 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -64,6 +64,10 @@ static const struct renesas_family fam_rzg2l __initconst __maybe_unused = {
.name = "RZ/G2L",
};
+static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
+ .name = "RZ/G2UL",
+};
+
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@@ -148,6 +152,11 @@ static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = {
.id = 0x841c447,
};
+static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
+ .family = &fam_rzg2ul,
+ .id = 0x8450447,
+};
+
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@@ -223,7 +232,7 @@ static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
};
static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
- .family = &fam_rcar_gen3,
+ .family = &fam_rcar_gen4,
.id = 0x59,
};
@@ -232,6 +241,11 @@ static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = {
.id = 0x5a,
};
+static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5c,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -340,6 +354,12 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A779F0
{ .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 },
#endif
+#ifdef CONFIG_ARCH_R8A779G0
+ { .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
+#endif
+#if defined(CONFIG_ARCH_R9A07G043)
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_g2ul },
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
@@ -378,6 +398,7 @@ static const struct renesas_id id_prr __initconst = {
static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,bsid", .data = &id_bsid },
+ { .compatible = "renesas,r9a07g043-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,prr", .data = &id_prr },
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index 156ac0e0c8fe..aff2f7e95237 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -23,23 +23,23 @@ config ROCKCHIP_IODOMAIN
voltage supplied by the regulators.
config ROCKCHIP_PM_DOMAINS
- bool "Rockchip generic power domain"
- depends on PM
- select PM_GENERIC_DOMAINS
- help
- Say y here to enable power domain support.
- In order to meet high performance and low power requirements, a power
- management unit is designed or saving power when RK3288 in low power
- mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
+ bool "Rockchip generic power domain"
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, a power
+ management unit is designed or saving power when RK3288 in low power
+ mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
- If unsure, say N.
+ If unsure, say N.
config ROCKCHIP_DTPM
tristate "Rockchip DTPM hierarchy"
depends on DTPM && m
help
- Describe the hierarchy for the Dynamic Thermal Power
- Management tree on this platform. That will create all the
- power capping capable devices.
+ Describe the hierarchy for the Dynamic Thermal Power Management tree
+ on this platform. That will create all the power capping capable
+ devices.
endif
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 494cf2b5bf7b..384461b70684 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -108,6 +108,20 @@ static const struct rockchip_grf_info rk3399_grf __initconst = {
.num_values = ARRAY_SIZE(rk3399_defaults),
};
+#define RK3566_GRF_USB3OTG0_CON1 0x0104
+
+static const struct rockchip_grf_value rk3566_defaults[] __initconst = {
+ { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(0, 1, 12) },
+ { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 7) },
+ { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 0) },
+};
+
+static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
+ .values = rk3566_defaults,
+ .num_values = ARRAY_SIZE(rk3566_defaults),
+};
+
+
static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
{
.compatible = "rockchip,rk3036-grf",
@@ -130,6 +144,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
}, {
.compatible = "rockchip,rk3399-grf",
.data = (void *)&rk3399_grf,
+ }, {
+ .compatible = "rockchip,rk3566-pipe-grf",
+ .data = (void *)&rk3566_pipegrf,
},
{ /* sentinel */ },
};
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index b1cf7d29dafd..89795abac951 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -283,7 +283,7 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
regmap_update_bits(pmu->regmap, pmu->info->req_offset,
pd_info->req_mask, idle ? -1U : 0);
- dsb(sy);
+ wmb();
/* Wait util idle_ack = 1 */
target_ack = idle ? pd_info->ack_mask : 0;
@@ -390,7 +390,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
pd->info->pwr_mask, on ? 0 : -1U);
- dsb(sy);
+ wmb();
if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
is_on == on, 0, 10000)) {
@@ -1186,9 +1186,9 @@ static struct platform_driver rockchip_pm_domain_driver = {
.name = "rockchip-pm-domain",
.of_match_table = rockchip_pm_domain_dt_match,
/*
- * We can't forcibly eject devices form power domain,
- * so we can't really remove power domains once they
- * were added.
+ * We can't forcibly eject devices from the power
+ * domain, so we can't really remove power domains
+ * once they were added.
*/
.suppress_bind_attrs = true,
},
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 8b53ed1cc67e..5725c8ef0406 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -146,6 +146,7 @@ config SOC_TEGRA_PMC
select GENERIC_PINCONF
select PM_OPP
select PM_GENERIC_DOMAINS
+ select REGMAP
config SOC_TEGRA_POWERGATE_BPMP
def_bool y
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index aa94fda282f4..b0a8405dbdb1 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2013-2021, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
@@ -162,7 +162,7 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
.bit_offset = 0,
.nbits = 32,
}, {
- .name = "gcplex-config-fuse",
+ .name = "gpu-gcplex-config-fuse",
.offset = 0x1c8,
.bytes = 4,
.bit_offset = 0,
@@ -186,13 +186,13 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
.bit_offset = 0,
.nbits = 32,
}, {
- .name = "pdi0",
+ .name = "gpu-pdi0",
.offset = 0x300,
.bytes = 4,
.bit_offset = 0,
.nbits = 32,
}, {
- .name = "pdi1",
+ .name = "gpu-pdi1",
.offset = 0x304,
.bytes = 4,
.bit_offset = 0,
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index b071d433d74f..f01d8a2547b6 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/device.h>
@@ -344,6 +344,21 @@ static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = {
.cell_name = "xusb-pad-calibration-ext",
.dev_id = "3520000.padctl",
.con_id = "calibration-ext",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-gcplex-config-fuse",
+ .dev_id = "17000000.gpu",
+ .con_id = "gcplex-config-fuse",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-pdi0",
+ .dev_id = "17000000.gpu",
+ .con_id = "pdi0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-pdi1",
+ .dev_id = "17000000.gpu",
+ .con_id = "pdi1",
},
};
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index fdf508e03400..c77ecf61818b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -394,6 +394,8 @@ struct tegra_pmc_soc {
* @domain: IRQ domain provided by the PMC
* @irq: chip implementation for the IRQ domain
* @clk_nb: pclk clock changes handler
+ * @core_domain_state_synced: flag marking the core domain's state as synced
+ * @core_domain_registered: flag marking the core domain as registered
*/
struct tegra_pmc {
struct device *dev;
@@ -3766,7 +3768,7 @@ static const struct tegra_pmc_regs tegra234_pmc_regs = {
};
static const char * const tegra234_reset_sources[] = {
- "SYS_RESET_N",
+ "SYS_RESET_N", /* 0x0 */
"AOWDT",
"BCCPLEXWDT",
"BPMPWDT",
@@ -3774,19 +3776,36 @@ static const char * const tegra234_reset_sources[] = {
"SPEWDT",
"APEWDT",
"LCCPLEXWDT",
- "SENSOR",
- "AOTAG",
- "VFSENSOR",
+ "SENSOR", /* 0x8 */
+ NULL,
+ NULL,
"MAINSWRST",
"SC7",
"HSM",
- "CSITE",
+ NULL,
"RCEWDT",
- "PVA0WDT",
- "PVA1WDT",
- "L1A_ASYNC",
+ NULL, /* 0x10 */
+ NULL,
+ NULL,
"BPMPBOOT",
"FUSECRC",
+ "DCEWDT",
+ "PSCWDT",
+ "PSC",
+ "CSITE_SW", /* 0x18 */
+ "POD",
+ "SCPM",
+ "VREFRO_POWERBAD",
+ "VMON",
+ "FMON",
+ "FSI_R5WDT",
+ "FSI_THERM",
+ "FSI_R52C0WDT", /* 0x20 */
+ "FSI_R52C1WDT",
+ "FSI_R52C2WDT",
+ "FSI_R52C3WDT",
+ "FSI_FMON",
+ "FSI_VMON", /* 0x25 */
};
static const struct tegra_wake_event tegra234_wake_events[] = {
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 700d8eecd8c4..d756591de973 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -415,9 +415,8 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
{
- struct knav_dma_chan *chan;
- struct knav_dma_device *dma;
- bool found = false;
+ struct knav_dma_device *dma = NULL, *iter1;
+ struct knav_dma_chan *chan = NULL, *iter2;
int chan_num = -1;
const char *instance;
@@ -444,33 +443,32 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
/* Look for correct dma instance */
- list_for_each_entry(dma, &kdev->list, list) {
- if (!strcmp(dma->name, instance)) {
- found = true;
+ list_for_each_entry(iter1, &kdev->list, list) {
+ if (!strcmp(iter1->name, instance)) {
+ dma = iter1;
break;
}
}
- if (!found) {
+ if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
return (void *)-EINVAL;
}
/* Look for correct dma channel from dma instance */
- found = false;
- list_for_each_entry(chan, &dma->chan_list, list) {
+ list_for_each_entry(iter2, &dma->chan_list, list) {
if (config->direction == DMA_MEM_TO_DEV) {
- if (chan->channel == chan_num) {
- found = true;
+ if (iter2->channel == chan_num) {
+ chan = iter2;
break;
}
} else {
- if (chan->flow == chan_num) {
- found = true;
+ if (iter2->flow == chan_num) {
+ chan = iter2;
break;
}
}
}
- if (!found) {
+ if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
return (void *)-EINVAL;
@@ -747,9 +745,8 @@ static int knav_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->list);
pm_runtime_enable(kdev->dev);
- ret = pm_runtime_get_sync(kdev->dev);
+ ret = pm_runtime_resume_and_get(kdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
goto err_pm_disable;
}
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 2ac3856b8d42..92af7d1b6f5b 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -758,10 +758,9 @@ void *knav_pool_create(const char *name,
int num_desc, int region_id)
{
struct knav_region *reg_itr, *region = NULL;
- struct knav_pool *pool, *pi;
+ struct knav_pool *pool, *pi = NULL, *iter;
struct list_head *node;
unsigned last_offset;
- bool slot_found;
int ret;
if (!kdev)
@@ -790,7 +789,7 @@ void *knav_pool_create(const char *name,
}
pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
- if (IS_ERR_OR_NULL(pool->queue)) {
+ if (IS_ERR(pool->queue)) {
dev_err(kdev->dev,
"failed to open queue for pool(%s), error %ld\n",
name, PTR_ERR(pool->queue));
@@ -816,18 +815,17 @@ void *knav_pool_create(const char *name,
* the request
*/
last_offset = 0;
- slot_found = false;
node = &region->pools;
- list_for_each_entry(pi, &region->pools, region_inst) {
- if ((pi->region_offset - last_offset) >= num_desc) {
- slot_found = true;
+ list_for_each_entry(iter, &region->pools, region_inst) {
+ if ((iter->region_offset - last_offset) >= num_desc) {
+ pi = iter;
break;
}
- last_offset = pi->region_offset + pi->num_desc;
+ last_offset = iter->region_offset + iter->num_desc;
}
- node = &pi->region_inst;
- if (slot_found) {
+ if (pi) {
+ node = &pi->region_inst;
pool->region = region;
pool->num_desc = num_desc;
pool->region_offset = last_offset;
@@ -1785,9 +1783,8 @@ static int knav_queue_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->pdsps);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index f32e1cbbe8c5..913b964374a4 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -941,23 +941,20 @@ static int omap_prm_probe(struct platform_device *pdev)
struct resource *res;
const struct omap_prm_data *data;
struct omap_prm *prm;
- const struct of_device_id *match;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- match = of_match_device(omap_prm_id_table, &pdev->dev);
- if (!match)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENOTSUPP;
prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
if (!prm)
return -ENOMEM;
- data = match->data;
-
while (data->base != res->start) {
if (!data->base)
return -EINVAL;
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 7bab4bbaf02d..ce09c42eaed2 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -555,11 +555,9 @@ static int am33xx_pm_probe(struct platform_device *pdev)
#endif /* CONFIG_SUSPEND */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto err_pm_runtime_disable;
- }
ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index b36779309e49..0e4ba0f89533 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -279,10 +279,9 @@ static int pruss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pruss);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "couldn't enable module\n");
- pm_runtime_put_noidle(dev);
goto rpm_disable;
}
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8afb3f45d263..a33ec7eaf23d 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -183,6 +183,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains),
GFP_KERNEL);
+ if (!pd_provider->data.domains)
+ return -ENOMEM;
pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 2f03ced0f411..0076d467ff6b 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -7,7 +7,9 @@
* Dave Gerlach <d-gerlach@ti.com>
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
@@ -40,12 +42,30 @@
#define M3_FW_VERSION_MASK 0xffff
#define M3_WAKE_SRC_MASK 0xff
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
#define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1
#define M3_STATE_INITED 2
#define M3_STATE_MSG_FOR_LP 3
#define M3_STATE_MSG_FOR_RESET 4
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
static struct wkup_m3_ipc *m3_ipc_state;
static const struct wkup_m3_wakeup_src wakeups[] = {
@@ -66,6 +86,148 @@ static const struct wkup_m3_wakeup_src wakeups[] = {
{.irq_nr = 0, .src = "Unknown"},
};
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE,
+ NULL);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (!m3_ipc->dbg_path)
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{
writel(AM33XX_M3_TXEV_ACK,
@@ -130,6 +292,7 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
}
m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
complete(&m3_ipc->sync_complete);
break;
case M3_STATE_MSG_FOR_RESET:
@@ -215,6 +378,17 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
(m3_ipc->state != M3_STATE_UNKNOWN));
}
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
@@ -280,12 +454,15 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
switch (state) {
case WKUP_M3_DEEPSLEEP:
m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
break;
case WKUP_M3_STANDBY:
m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
case WKUP_M3_IDLE:
m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
default:
return 1;
@@ -294,11 +471,13 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/* Program each required IPC register then write defaults to others */
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
- wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
- wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
@@ -433,12 +612,13 @@ static int wkup_m3_rproc_boot_thread(void *arg)
static int wkup_m3_ipc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int irq, ret;
+ int irq, ret, temp;
phandle rproc_phandle;
struct rproc *m3_rproc;
struct resource *res;
struct task_struct *task;
struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
if (!m3_ipc)
@@ -450,10 +630,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
return PTR_ERR(m3_ipc->ipc_mem_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
0, "wkup_m3_txev", m3_ipc);
@@ -496,6 +674,22 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->ops = &ipc_ops;
+ if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_find_property(np, "ti,set-io-isolation", NULL))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "firmware-name",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ };
+
/*
* Wait for firmware loading completion in a thread so we
* can boot the wkup_m3 as soon as it's ready without holding
@@ -510,6 +704,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
return 0;
err_put_rproc:
@@ -521,6 +717,8 @@ err_free_mbox:
static int wkup_m3_ipc_remove(struct platform_device *pdev)
{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
mbox_free_channel(m3_ipc_state->mbox);
rproc_shutdown(m3_ipc_state->rproc);
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index e99d840c2511..73a147202e88 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
# Generic Trusted Execution Environment Configuration
-config TEE
+menuconfig TEE
tristate "Trusted Execution Environment support"
depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
select CRYPTO
@@ -13,10 +13,7 @@ config TEE
if TEE
-menu "TEE drivers"
-
source "drivers/tee/optee/Kconfig"
source "drivers/tee/amdtee/Kconfig"
-endmenu
endif
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index bd49ec934060..6554e06e053e 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -11,6 +11,34 @@
#include <linux/types.h>
#include "optee_private.h"
+#define MAX_ARG_PARAM_COUNT 6
+
+/*
+ * How much memory we allocate for each entry. This doesn't have to be a
+ * single page, but it makes sense to keep at least keep it as multiples of
+ * the page size.
+ */
+#define SHM_ENTRY_SIZE PAGE_SIZE
+
+/*
+ * We need to have a compile time constant to be able to determine the
+ * maximum needed size of the bit field.
+ */
+#define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
+#define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
+
+/*
+ * Shared memory for argument structs are cached here. The number of
+ * arguments structs that can fit is determined at runtime depending on the
+ * needed RPC parameter count reported by secure world
+ * (optee->rpc_param_count).
+ */
+struct optee_shm_arg_entry {
+ struct list_head list_node;
+ struct tee_shm *shm;
+ DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
+};
+
void optee_cq_wait_init(struct optee_call_queue *cq,
struct optee_call_waiter *w)
{
@@ -104,37 +132,149 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata,
return NULL;
}
-struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
- struct optee_msg_arg **msg_arg)
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags)
+{
+ INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args);
+ mutex_init(&optee->shm_arg_cache.mutex);
+ optee->shm_arg_cache.flags = flags;
+}
+
+void optee_shm_arg_cache_uninit(struct optee *optee)
+{
+ struct list_head *head = &optee->shm_arg_cache.shm_args;
+ struct optee_shm_arg_entry *entry;
+
+ mutex_destroy(&optee->shm_arg_cache.mutex);
+ while (!list_empty(head)) {
+ entry = list_first_entry(head, struct optee_shm_arg_entry,
+ list_node);
+ list_del(&entry->list_node);
+ if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
+ MAX_ARG_COUNT_PER_ENTRY) {
+ pr_err("Freeing non-free entry\n");
+ }
+ tee_shm_free(entry->shm);
+ kfree(entry);
+ }
+}
+
+size_t optee_msg_arg_size(size_t rpc_param_count)
+{
+ size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT);
+
+ if (rpc_param_count)
+ sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count);
+
+ return sz;
+}
+
+/**
+ * optee_get_msg_arg() - Provide shared memory for argument struct
+ * @ctx: Caller TEE context
+ * @num_params: Number of parameter to store
+ * @entry_ret: Entry pointer, needed when freeing the buffer
+ * @shm_ret: Shared memory buffer
+ * @offs_ret: Offset of argument strut in shared memory buffer
+ *
+ * @returns a pointer to the argument struct in memory, else an ERR_PTR
+ */
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+ size_t num_params,
+ struct optee_shm_arg_entry **entry_ret,
+ struct tee_shm **shm_ret,
+ u_int *offs_ret)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
- struct tee_shm *shm;
+ size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+ struct optee_shm_arg_entry *entry;
struct optee_msg_arg *ma;
+ size_t args_per_entry;
+ u_long bit;
+ u_int offs;
+ void *res;
+
+ if (num_params > MAX_ARG_PARAM_COUNT)
+ return ERR_PTR(-EINVAL);
+
+ if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED)
+ args_per_entry = SHM_ENTRY_SIZE / sz;
+ else
+ args_per_entry = 1;
+
+ mutex_lock(&optee->shm_arg_cache.mutex);
+ list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
+ bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
+ if (bit < args_per_entry)
+ goto have_entry;
+ }
/*
- * rpc_arg_count is set to the number of allocated parameters in
- * the RPC argument struct if a second MSG arg struct is expected.
- * The second arg struct will then be used for RPC.
+ * No entry was found, let's allocate a new.
*/
- if (optee->rpc_arg_count)
- sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ res = ERR_PTR(-ENOMEM);
+ goto out;
+ }
- shm = tee_shm_alloc_priv_buf(ctx, sz);
- if (IS_ERR(shm))
- return shm;
+ if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
+ res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE);
+ else
+ res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
- ma = tee_shm_get_va(shm, 0);
- if (IS_ERR(ma)) {
- tee_shm_free(shm);
- return (void *)ma;
+ if (IS_ERR(res)) {
+ kfree(entry);
+ goto out;
}
-
- memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
+ entry->shm = res;
+ list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
+ bit = 0;
+
+have_entry:
+ offs = bit * sz;
+ res = tee_shm_get_va(entry->shm, offs);
+ if (IS_ERR(res))
+ goto out;
+ ma = res;
+ set_bit(bit, entry->map);
+ memset(ma, 0, sz);
ma->num_params = num_params;
- *msg_arg = ma;
+ *entry_ret = entry;
+ *shm_ret = entry->shm;
+ *offs_ret = offs;
+out:
+ mutex_unlock(&optee->shm_arg_cache.mutex);
+ return res;
+}
+
+/**
+ * optee_free_msg_arg() - Free previsouly obtained shared memory
+ * @ctx: Caller TEE context
+ * @entry: Pointer returned when the shared memory was obtained
+ * @offs: Offset of shared memory buffer to free
+ *
+ * This function frees the shared memory obtained with optee_get_msg_arg().
+ */
+void optee_free_msg_arg(struct tee_context *ctx,
+ struct optee_shm_arg_entry *entry, u_int offs)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+ u_long bit;
- return shm;
+ if (offs > SHM_ENTRY_SIZE || offs % sz) {
+ pr_err("Invalid offs %u\n", offs);
+ return;
+ }
+ bit = offs / sz;
+
+ mutex_lock(&optee->shm_arg_cache.mutex);
+
+ if (!test_bit(bit, entry->map))
+ pr_err("Bit pos %lu is already free\n", bit);
+ clear_bit(bit, entry->map);
+
+ mutex_unlock(&optee->shm_arg_cache.mutex);
}
int optee_open_session(struct tee_context *ctx,
@@ -143,16 +283,19 @@ int optee_open_session(struct tee_context *ctx,
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
- int rc;
+ struct optee_shm_arg_entry *entry;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
struct optee_session *sess = NULL;
uuid_t client_uuid;
+ u_int offs;
+ int rc;
/* +2 for the meta parameters added below */
- shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
+ msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2,
+ &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
msg_arg->cancel_id = arg->cancel_id;
@@ -185,7 +328,7 @@ int optee_open_session(struct tee_context *ctx,
goto out;
}
- if (optee->ops->do_call_with_arg(ctx, shm)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -212,26 +355,28 @@ int optee_open_session(struct tee_context *ctx,
arg->ret_origin = msg_arg->ret_origin;
}
out:
- tee_shm_free(shm);
+ optee_free_msg_arg(ctx, entry, offs);
return rc;
}
int optee_close_session_helper(struct tee_context *ctx, u32 session)
{
- struct tee_shm *shm;
struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
- shm = optee_get_msg_arg(ctx, 0, &msg_arg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
msg_arg->session = session;
- optee->ops->do_call_with_arg(ctx, shm);
+ optee->ops->do_call_with_arg(ctx, shm, offs);
- tee_shm_free(shm);
+ optee_free_msg_arg(ctx, entry, offs);
return 0;
}
@@ -259,9 +404,11 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
- struct tee_shm *shm;
+ struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
+ struct tee_shm *shm;
+ u_int offs;
int rc;
/* Check that the session is valid */
@@ -271,9 +418,10 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (!sess)
return -EINVAL;
- shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
+ msg_arg = optee_get_msg_arg(ctx, arg->num_params,
+ &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
msg_arg->func = arg->func;
msg_arg->session = arg->session;
@@ -284,7 +432,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (rc)
goto out;
- if (optee->ops->do_call_with_arg(ctx, shm)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -298,7 +446,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
arg->ret = msg_arg->ret;
arg->ret_origin = msg_arg->ret_origin;
out:
- tee_shm_free(shm);
+ optee_free_msg_arg(ctx, entry, offs);
return rc;
}
@@ -306,9 +454,11 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
- struct tee_shm *shm;
+ struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
+ struct tee_shm *shm;
+ u_int offs;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
@@ -317,16 +467,16 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
if (!sess)
return -EINVAL;
- shm = optee_get_msg_arg(ctx, 0, &msg_arg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
msg_arg->session = session;
msg_arg->cancel_id = cancel_id;
- optee->ops->do_call_with_arg(ctx, shm);
+ optee->ops->do_call_with_arg(ctx, shm, offs);
- tee_shm_free(shm);
+ optee_free_msg_arg(ctx, entry, offs);
return 0;
}
@@ -362,7 +512,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
* Allow kernel address to register with OP-TEE as kernel
* pages are configured as normal memory only.
*/
- if (virt_addr_valid(start))
+ if (virt_addr_valid(start) || is_vmalloc_addr((void *)start))
return 0;
mmap_read_lock(mm);
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index daf947e98d14..daf07737c4fd 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -171,6 +171,7 @@ void optee_remove_common(struct optee *optee)
optee_unregister_devices();
optee_notif_uninit(optee);
+ optee_shm_arg_cache_uninit(optee);
teedev_close_context(optee->ctx);
/*
* The two devices have to be unregistered before we can free the
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index c9b3b2cfb2b2..7ab31740cff8 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -601,6 +601,7 @@ done:
* optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
+ * @offs: offset of the message in @shm
*
* Does a FF-A call to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -609,24 +610,33 @@ done:
*/
static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm)
+ struct tee_shm *shm, u_int offs)
{
struct ffa_send_direct_data data = {
.data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
.data1 = (u32)shm->sec_world_id,
.data2 = (u32)(shm->sec_world_id >> 32),
- .data3 = shm->offset,
+ .data3 = offs,
};
struct optee_msg_arg *arg;
unsigned int rpc_arg_offs;
struct optee_msg_arg *rpc_arg;
- arg = tee_shm_get_va(shm, 0);
+ /*
+ * The shared memory object has to start on a page when passed as
+ * an argument struct. This is also what the shm pool allocator
+ * returns, but check this before calling secure world to catch
+ * eventual errors early in case something changes.
+ */
+ if (shm->offset)
+ return -EINVAL;
+
+ arg = tee_shm_get_va(shm, offs);
if (IS_ERR(arg))
return PTR_ERR(arg);
rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
- rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
if (IS_ERR(rpc_arg))
return PTR_ERR(rpc_arg);
@@ -678,7 +688,8 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
const struct ffa_dev_ops *ops,
- unsigned int *rpc_arg_count)
+ u32 *sec_caps,
+ unsigned int *rpc_param_count)
{
struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
int rc;
@@ -693,7 +704,8 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
return false;
}
- *rpc_arg_count = (u8)data.data1;
+ *rpc_param_count = (u8)data.data1;
+ *sec_caps = data.data2;
return true;
}
@@ -759,7 +771,7 @@ static const struct optee_ops optee_ffa_ops = {
static void optee_ffa_remove(struct ffa_device *ffa_dev)
{
- struct optee *optee = ffa_dev->dev.driver_data;
+ struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
optee_remove_common(optee);
@@ -772,11 +784,13 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
const struct ffa_dev_ops *ffa_ops;
- unsigned int rpc_arg_count;
+ unsigned int rpc_param_count;
struct tee_shm_pool *pool;
struct tee_device *teedev;
struct tee_context *ctx;
+ u32 arg_cache_flags = 0;
struct optee *optee;
+ u32 sec_caps;
int rc;
ffa_ops = ffa_dev_ops_get(ffa_dev);
@@ -788,8 +802,11 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
return -EINVAL;
- if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &rpc_arg_count))
+ if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
+ &rpc_param_count))
return -EINVAL;
+ if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
+ arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee)
@@ -805,7 +822,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
optee->ffa.ffa_ops = ffa_ops;
- optee->rpc_arg_count = rpc_arg_count;
+ optee->rpc_param_count = rpc_param_count;
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
optee);
@@ -838,6 +855,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_supp_init(&optee->supp);
+ optee_shm_arg_cache_init(optee, arg_cache_flags);
ffa_dev_set_drvdata(ffa_dev, optee);
ctx = teedev_open(optee->teedev);
if (IS_ERR(ctx)) {
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index ee3a03fc392c..97266243deaa 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -81,8 +81,16 @@
* as the second MSG arg struct for
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
* Bit[31:8]: Reserved (MBZ)
- * w5-w7: Note used (MBZ)
+ * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
+ * unused bits MBZ.
+ * w6-w7: Not used (MBZ)
+ */
+/*
+ * Secure world supports giving an offset into the argument shared memory
+ * object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
*/
+#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
+
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
/*
@@ -112,6 +120,8 @@
* OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg
* for RPC, this struct has reserved space for the number of RPC
* parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * MBZ unless the bit OPTEE_FFA_SEC_CAP_ARG_OFFSET is received with
+ * OPTEE_FFA_EXCHANGE_CAPABILITIES.
* w7: Not used (MBZ)
* Resume from RPC. Register usage:
* w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index e77765c78878..a33d98d17cfd 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -59,6 +59,16 @@ struct optee_notif {
u_long *bitmap;
};
+#define OPTEE_SHM_ARG_ALLOC_PRIV BIT(0)
+#define OPTEE_SHM_ARG_SHARED BIT(1)
+struct optee_shm_arg_entry;
+struct optee_shm_arg_cache {
+ u32 flags;
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head shm_args;
+};
+
/**
* struct optee_supp - supplicant synchronization struct
* @ctx the context of current connected supplicant.
@@ -121,7 +131,7 @@ struct optee;
*/
struct optee_ops {
int (*do_call_with_arg)(struct tee_context *ctx,
- struct tee_shm *shm_arg);
+ struct tee_shm *shm_arg, u_int offs);
int (*to_msg_param)(struct optee *optee,
struct optee_msg_param *msg_params,
size_t num_params, const struct tee_param *params);
@@ -143,7 +153,7 @@ struct optee_ops {
* @notif: notification synchronization struct
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
- * @rpc_arg_count: If > 0 number of RPC parameters to make room for
+ * @rpc_param_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
* @scan_bus_wq workqueue to scan optee bus and register optee drivers
* @scan_bus_work workq to scan optee bus and register optee drivers
@@ -157,11 +167,12 @@ struct optee {
struct optee_smc smc;
struct optee_ffa ffa;
};
+ struct optee_shm_arg_cache shm_arg_cache;
struct optee_call_queue call_queue;
struct optee_notif notif;
struct optee_supp supp;
struct tee_shm_pool *pool;
- unsigned int rpc_arg_count;
+ unsigned int rpc_param_count;
bool scan_bus_done;
struct workqueue_struct *scan_bus_wq;
struct work_struct scan_bus_work;
@@ -273,8 +284,18 @@ void optee_cq_wait_for_completion(struct optee_call_queue *cq,
void optee_cq_wait_final(struct optee_call_queue *cq,
struct optee_call_waiter *w);
int optee_check_mem_type(unsigned long start, size_t num_pages);
-struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
- struct optee_msg_arg **msg_arg);
+
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags);
+void optee_shm_arg_cache_uninit(struct optee *optee);
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+ size_t num_params,
+ struct optee_shm_arg_entry **entry,
+ struct tee_shm **shm_ret,
+ u_int *offs);
+void optee_free_msg_arg(struct tee_context *ctx,
+ struct optee_shm_arg_entry *entry, u_int offs);
+size_t optee_msg_arg_size(size_t rpc_param_count);
+
struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index d44a6ae994f8..c60896cf71cb 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -107,14 +107,22 @@ struct optee_smc_call_get_os_revision_result {
/*
* Call with struct optee_msg_arg as argument
*
- * When calling this function normal world has a few responsibilities:
+ * When called with OPTEE_SMC_CALL_WITH_RPC_ARG or
+ * OPTEE_SMC_CALL_WITH_REGD_ARG in a0 there is one RPC struct optee_msg_arg
+ * following after the first struct optee_msg_arg. The RPC struct
+ * optee_msg_arg has reserved space for the number of RPC parameters as
+ * returned by OPTEE_SMC_EXCHANGE_CAPABILITIES.
+ *
+ * When calling these functions, normal world has a few responsibilities:
* 1. It must be able to handle eventual RPCs
* 2. Non-secure interrupts should not be masked
* 3. If asynchronous notifications has been negotiated successfully, then
- * asynchronous notifications should be unmasked during this call.
+ * the interrupt for asynchronous notifications should be unmasked
+ * during this call.
*
- * Call register usage:
- * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * Call register usage, OPTEE_SMC_CALL_WITH_ARG and
+ * OPTEE_SMC_CALL_WITH_RPC_ARG:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_ARG or OPTEE_SMC_CALL_WITH_RPC_ARG
* a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
* a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
* a3 Cache settings, not used if physical pointer is in a predefined shared
@@ -122,6 +130,15 @@ struct optee_smc_call_get_os_revision_result {
* a4-6 Not used
* a7 Hypervisor Client ID register
*
+ * Call register usage, OPTEE_SMC_CALL_WITH_REGD_ARG:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_REGD_ARG
+ * a1 Upper 32 bits of a 64-bit shared memory cookie
+ * a2 Lower 32 bits of a 64-bit shared memory cookie
+ * a3 Offset of the struct optee_msg_arg in the shared memory with the
+ * supplied cookie
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
* Normal return register usage:
* a0 Return value, OPTEE_SMC_RETURN_*
* a1-3 Not used
@@ -154,6 +171,10 @@ struct optee_smc_call_get_os_revision_result {
#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
#define OPTEE_SMC_CALL_WITH_ARG \
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+#define OPTEE_SMC_CALL_WITH_RPC_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG)
+#define OPTEE_SMC_CALL_WITH_REGD_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG)
/*
* Get Shared Memory Config
@@ -202,7 +223,11 @@ struct optee_smc_get_shm_config_result {
* a0 OPTEE_SMC_RETURN_OK
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
* a2 The maximum secure world notification number
- * a3-7 Preserved
+ * a3 Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_SMC_CALL_WITH_ARG
+ * Bit[31:8]: Reserved (MBZ)
+ * a4-7 Preserved
*
* Error return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
@@ -227,6 +252,8 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
/* Secure world supports asynchronous notification of normal world */
#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
+/* Secure world supports pre-allocating RPC arg struct */
+#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -236,7 +263,7 @@ struct optee_smc_exchange_capabilities_result {
unsigned long status;
unsigned long capabilities;
unsigned long max_notif_value;
- unsigned long reserved0;
+ unsigned long data;
};
/*
@@ -358,6 +385,9 @@ struct optee_smc_disable_shm_cache_result {
* should be called until all pended values have been retrieved. When a
* value is retrieved, it's cleared from the record in secure world.
*
+ * It is expected that this function is called from an interrupt handler
+ * in normal world.
+ *
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
* a1-6 Not used
@@ -390,6 +420,12 @@ struct optee_smc_disable_shm_cache_result {
#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
+/* See OPTEE_SMC_CALL_WITH_RPC_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG 18
+
+/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
+
/*
* Resume from RPC (for example after processing a foreign interrupt)
*
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 67b7f7d2ff27..385cb0aee610 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -437,6 +437,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
struct optee_msg_arg *msg_arg;
struct tee_shm *shm_arg;
u64 *pages_list;
+ size_t sz;
int rc;
if (!num_pages)
@@ -450,15 +451,30 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
if (!pages_list)
return -ENOMEM;
- shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ /*
+ * We're about to register shared memory we can't register shared
+ * memory for this request or there's a catch-22.
+ *
+ * So in this we'll have to do the good old temporary private
+ * allocation instead of using optee_get_msg_arg().
+ */
+ sz = optee_msg_arg_size(optee->rpc_param_count);
+ shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
if (IS_ERR(shm_arg)) {
rc = PTR_ERR(shm_arg);
goto out;
}
+ msg_arg = tee_shm_get_va(shm_arg, 0);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out;
+ }
optee_fill_pages_list(pages_list, pages, num_pages,
tee_shm_get_page_offset(shm));
+ memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ msg_arg->num_params = 1;
msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
@@ -471,7 +487,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
@@ -487,19 +503,37 @@ static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
struct optee_msg_arg *msg_arg;
struct tee_shm *shm_arg;
int rc = 0;
+ size_t sz;
- shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ /*
+ * We're about to unregister shared memory and we may not be able
+ * register shared memory for this request in case we're called
+ * from optee_shm_arg_cache_uninit().
+ *
+ * So in order to keep things simple in this function just as in
+ * optee_shm_register() we'll use temporary private allocation
+ * instead of using optee_get_msg_arg().
+ */
+ sz = optee_msg_arg_size(optee->rpc_param_count);
+ shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
if (IS_ERR(shm_arg))
return PTR_ERR(shm_arg);
+ msg_arg = tee_shm_get_va(shm_arg, 0);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out;
+ }
+ memset(msg_arg, 0, sz);
+ msg_arg->num_params = 1;
msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
-
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
- if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
+out:
tee_shm_free(shm_arg);
return rc;
}
@@ -732,16 +766,9 @@ static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
}
static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
- struct tee_shm *shm,
+ struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
- struct optee_msg_arg *arg;
-
- arg = tee_shm_get_va(shm, 0);
- if (IS_ERR(arg)) {
- pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
- return;
- }
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
@@ -765,11 +792,13 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
* Result of RPC is written back into @param.
*/
static void optee_handle_rpc(struct tee_context *ctx,
+ struct optee_msg_arg *rpc_arg,
struct optee_rpc_param *param,
struct optee_call_ctx *call_ctx)
{
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_msg_arg *arg;
struct tee_shm *shm;
phys_addr_t pa;
@@ -801,8 +830,19 @@ static void optee_handle_rpc(struct tee_context *ctx,
*/
break;
case OPTEE_SMC_RPC_FUNC_CMD:
- shm = reg_pair_to_ptr(param->a1, param->a2);
- handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
+ if (rpc_arg) {
+ arg = rpc_arg;
+ } else {
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n",
+ __func__, shm);
+ break;
+ }
+ }
+
+ handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
break;
default:
pr_warn("Unknown RPC func 0x%x\n",
@@ -816,7 +856,8 @@ static void optee_handle_rpc(struct tee_context *ctx,
/**
* optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
* @ctx: calling context
- * @arg: shared memory holding the message to pass to secure world
+ * @shm: shared memory holding the message to pass to secure world
+ * @offs: offset of the message in @shm
*
* Does and SMC to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -824,21 +865,46 @@ static void optee_handle_rpc(struct tee_context *ctx,
* Returns return code from secure world, 0 is OK
*/
static int optee_smc_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *arg)
+ struct tee_shm *shm, u_int offs)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
struct optee_rpc_param param = { };
struct optee_call_ctx call_ctx = { };
- phys_addr_t parg;
+ struct optee_msg_arg *rpc_arg = NULL;
int rc;
- rc = tee_shm_get_pa(arg, 0, &parg);
- if (rc)
- return rc;
+ if (optee->rpc_param_count) {
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+
+ arg = tee_shm_get_va(shm, offs);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+ }
+
+ if (rpc_arg && tee_shm_is_dynamic(shm)) {
+ param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
+ param.a3 = offs;
+ } else {
+ phys_addr_t parg;
- param.a0 = OPTEE_SMC_CALL_WITH_ARG;
- reg_pair_from_64(&param.a1, &param.a2, parg);
+ rc = tee_shm_get_pa(shm, offs, &parg);
+ if (rc)
+ return rc;
+
+ if (rpc_arg)
+ param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
+ else
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ }
/* Initialize waiter */
optee_cq_wait_init(&optee->call_queue, &w);
while (true) {
@@ -862,7 +928,7 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
param.a1 = res.a1;
param.a2 = res.a2;
param.a3 = res.a3;
- optee_handle_rpc(ctx, &param, &call_ctx);
+ optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
} else {
rc = res.a0;
break;
@@ -881,17 +947,19 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
{
+ struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct tee_shm *shm;
+ u_int offs;
- shm = optee_get_msg_arg(ctx, 0, &msg_arg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
msg_arg->cmd = cmd;
- optee_smc_do_call_with_arg(ctx, shm);
+ optee_smc_do_call_with_arg(ctx, shm, offs);
- tee_shm_free(shm);
+ optee_free_msg_arg(ctx, entry, offs);
return 0;
}
@@ -1118,7 +1186,8 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
}
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
- u32 *sec_caps, u32 *max_notif_value)
+ u32 *sec_caps, u32 *max_notif_value,
+ unsigned int *rpc_param_count)
{
union {
struct arm_smccc_res smccc;
@@ -1145,6 +1214,10 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
*max_notif_value = res.result.max_notif_value;
else
*max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+ *rpc_param_count = (u8)res.result.data;
+ else
+ *rpc_param_count = 0;
return true;
}
@@ -1251,7 +1324,8 @@ static int optee_smc_remove(struct platform_device *pdev)
* reference counters and also avoid wild pointers in secure world
* into the old shared memory range.
*/
- optee_disable_shm_cache(optee);
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
optee_smc_notif_uninit_irq(optee);
@@ -1274,7 +1348,10 @@ static int optee_smc_remove(struct platform_device *pdev)
*/
static void optee_shutdown(struct platform_device *pdev)
{
- optee_disable_shm_cache(platform_get_drvdata(pdev));
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
}
static int optee_probe(struct platform_device *pdev)
@@ -1283,9 +1360,11 @@ static int optee_probe(struct platform_device *pdev)
struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
struct optee *optee = NULL;
void *memremaped_shm = NULL;
+ unsigned int rpc_param_count;
struct tee_device *teedev;
struct tee_context *ctx;
u32 max_notif_value;
+ u32 arg_cache_flags;
u32 sec_caps;
int rc;
@@ -1306,7 +1385,8 @@ static int optee_probe(struct platform_device *pdev)
}
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
- &max_notif_value)) {
+ &max_notif_value,
+ &rpc_param_count)) {
pr_warn("capabilities mismatch\n");
return -EINVAL;
}
@@ -1314,14 +1394,48 @@ static int optee_probe(struct platform_device *pdev)
/*
* Try to use dynamic shared memory if possible
*/
- if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+ /*
+ * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
+ * optee_get_msg_arg() to pre-register (by having
+ * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
+ * an argument struct.
+ *
+ * With the page is pre-registered we can use a non-zero
+ * offset for argument struct, this is indicated with
+ * OPTEE_SHM_ARG_SHARED.
+ *
+ * This means that optee_smc_do_call_with_arg() will use
+ * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+ arg_cache_flags = OPTEE_SHM_ARG_SHARED;
+ else
+ arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
+
pool = optee_shm_pool_alloc_pages();
+ }
/*
* If dynamic shared memory is not available or failed - try static one
*/
- if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
+ /*
+ * The static memory pool can use non-zero page offsets so
+ * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
+ *
+ * optee_get_msg_arg() should not pre-register the
+ * allocated page used to pass an argument struct, this is
+ * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
+ *
+ * This means that optee_smc_do_call_with_arg() will use
+ * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
+ * OPTEE_SMC_CALL_WITH_RPC_ARG.
+ */
+ arg_cache_flags = OPTEE_SHM_ARG_SHARED |
+ OPTEE_SHM_ARG_ALLOC_PRIV;
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ }
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1335,6 +1449,7 @@ static int optee_probe(struct platform_device *pdev)
optee->ops = &optee_ops;
optee->smc.invoke_fn = invoke_fn;
optee->smc.sec_caps = sec_caps;
+ optee->rpc_param_count = rpc_param_count;
teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
@@ -1363,6 +1478,7 @@ static int optee_probe(struct platform_device *pdev)
optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
+ optee_shm_arg_cache_init(optee, arg_cache_flags);
platform_set_drvdata(pdev, optee);
ctx = teedev_open(optee->teedev);
@@ -1403,7 +1519,12 @@ static int optee_probe(struct platform_device *pdev)
*/
optee_disable_unmapped_shm_cache(optee);
- optee_enable_shm_cache(optee);
+ /*
+ * Only enable the shm cache in case we're not able to pass the RPC
+ * arg struct right after the normal arg struct.
+ */
+ if (!optee->rpc_param_count)
+ optee_enable_shm_cache(optee);
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
@@ -1416,7 +1537,8 @@ static int optee_probe(struct platform_device *pdev)
return 0;
err_disable_shm_cache:
- optee_disable_shm_cache(optee);
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
optee_smc_notif_uninit_irq(optee);
optee_unregister_devices();
err_notif_uninit:
@@ -1424,6 +1546,7 @@ err_notif_uninit:
err_close_ctx:
teedev_close_context(ctx);
err_supp_uninit:
+ optee_shm_arg_cache_uninit(optee);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
err_unreg_supp_teedev:
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 8aa1a4836b92..af0f7c603fa4 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -302,7 +302,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
return PTR_ERR(shm);
data.id = shm->id;
- data.flags = shm->flags;
data.size = shm->size;
if (copy_to_user(udata, &data, sizeof(data)))
@@ -339,7 +338,6 @@ tee_ioctl_shm_register(struct tee_context *ctx,
return PTR_ERR(shm);
data.id = shm->id;
- data.flags = shm->flags;
data.length = shm->size;
if (copy_to_user(udata, &data, sizeof(data)))
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f31e29e8f1ca..f2b1bcefcadd 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -23,21 +23,36 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
static int shm_get_kernel_pages(unsigned long start, size_t page_count,
struct page **pages)
{
- struct kvec *kiov;
size_t n;
int rc;
- kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
- if (!kiov)
- return -ENOMEM;
+ if (is_vmalloc_addr((void *)start)) {
+ struct page *page;
- for (n = 0; n < page_count; n++) {
- kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
- kiov[n].iov_len = PAGE_SIZE;
- }
+ for (n = 0; n < page_count; n++) {
+ page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
+ if (!page)
+ return -ENOMEM;
+
+ get_page(page);
+ pages[n] = page;
+ }
+ rc = page_count;
+ } else {
+ struct kvec *kiov;
+
+ kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
+ if (!kiov)
+ return -ENOMEM;
+
+ for (n = 0; n < page_count; n++) {
+ kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
+ kiov[n].iov_len = PAGE_SIZE;
+ }
- rc = get_kernel_pages(kiov, page_count, 0, pages);
- kfree(kiov);
+ rc = get_kernel_pages(kiov, page_count, 0, pages);
+ kfree(kiov);
+ }
return rc;
}
@@ -415,56 +430,6 @@ void tee_shm_free(struct tee_shm *shm)
EXPORT_SYMBOL_GPL(tee_shm_free);
/**
- * tee_shm_va2pa() - Get physical address of a virtual address
- * @shm: Shared memory handle
- * @va: Virtual address to tranlsate
- * @pa: Returned physical address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
-{
- if (!shm->kaddr)
- return -EINVAL;
- /* Check that we're in the range of the shm */
- if ((char *)va < (char *)shm->kaddr)
- return -EINVAL;
- if ((char *)va >= ((char *)shm->kaddr + shm->size))
- return -EINVAL;
-
- return tee_shm_get_pa(
- shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
-}
-EXPORT_SYMBOL_GPL(tee_shm_va2pa);
-
-/**
- * tee_shm_pa2va() - Get virtual address of a physical address
- * @shm: Shared memory handle
- * @pa: Physical address to tranlsate
- * @va: Returned virtual address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
-{
- if (!shm->kaddr)
- return -EINVAL;
- /* Check that we're in the range of the shm */
- if (pa < shm->paddr)
- return -EINVAL;
- if (pa >= (shm->paddr + shm->size))
- return -EINVAL;
-
- if (va) {
- void *v = tee_shm_get_va(shm, pa - shm->paddr);
-
- if (IS_ERR(v))
- return PTR_ERR(v);
- *va = v;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(tee_shm_pa2va);
-
-/**
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory