summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_configfs.c25
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/nfit/core.c185
-rw-r--r--drivers/acpi/nfit/nfit.h17
-rw-r--r--drivers/acpi/pptt.c3
-rw-r--r--drivers/acpi/sysfs.c21
-rw-r--r--drivers/base/power/domain.c20
-rw-r--r--drivers/base/power/domain_governor.c1
-rw-r--r--drivers/base/power/main.c21
-rw-r--r--drivers/base/power/power.h1
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c16
-rw-r--r--drivers/base/power/sysfs.c12
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/power/wakeup.c30
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/actions/Kconfig5
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-pll.c2
-rw-r--r--drivers/clk/actions/owl-pll.h30
-rw-r--r--drivers/clk/actions/owl-s500.c525
-rw-r--r--drivers/clk/at91/clk-audio-pll.c9
-rw-r--r--drivers/clk/at91/clk-programmable.c3
-rw-r--r--drivers/clk/at91/sama5d2.c3
-rw-r--r--drivers/clk/clk-clps711x.c61
-rw-r--r--drivers/clk/clk-devres.c11
-rw-r--r--drivers/clk/clk-fixed-mmio.c101
-rw-r--r--drivers/clk/clk-fractional-divider.c2
-rw-r--r--drivers/clk/clk-gpio.c39
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-max77686.c28
-rw-r--r--drivers/clk/clk-qoriq.c5
-rw-r--r--drivers/clk/clk-stm32mp1.c37
-rw-r--r--drivers/clk/clk-twl6040.c53
-rw-r--r--drivers/clk/clk.c262
-rw-r--r--drivers/clk/clk.h23
-rw-r--r--drivers/clk/clkdev.c231
-rw-r--r--drivers/clk/imx/Kconfig6
-rw-r--r--drivers/clk/imx/Makefile4
-rw-r--r--drivers/clk/imx/clk-composite-8m.c2
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c1
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c16
-rw-r--r--drivers/clk/imx/clk-imx8mm.c675
-rw-r--r--drivers/clk/imx/clk-imx8mq.c254
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c1
-rw-r--r--drivers/clk/imx/clk-pll14xx.c392
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c514
-rw-r--r--drivers/clk/imx/clk-scu.c123
-rw-r--r--drivers/clk/imx/clk-scu.h16
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/imx/clk.h38
-rw-r--r--drivers/clk/ingenic/cgu.c13
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/mediatek/clk-gate.c4
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c4
-rw-r--r--drivers/clk/mediatek/clk-mtk.c4
-rw-r--r--drivers/clk/mediatek/clk-mtk.h29
-rw-r--r--drivers/clk/meson/Kconfig101
-rw-r--r--drivers/clk/meson/Makefile29
-rw-r--r--drivers/clk/meson/axg-aoclk.c193
-rw-r--r--drivers/clk/meson/axg-aoclk.h13
-rw-r--r--drivers/clk/meson/axg-audio.c5
-rw-r--r--drivers/clk/meson/axg.c69
-rw-r--r--drivers/clk/meson/clk-dualdiv.c138
-rw-r--r--drivers/clk/meson/clk-dualdiv.h33
-rw-r--r--drivers/clk/meson/clk-input.c7
-rw-r--r--drivers/clk/meson/clk-input.h19
-rw-r--r--drivers/clk/meson/clk-mpll.c12
-rw-r--r--drivers/clk/meson/clk-mpll.h30
-rw-r--r--drivers/clk/meson/clk-phase.c75
-rw-r--r--drivers/clk/meson/clk-phase.h26
-rw-r--r--drivers/clk/meson/clk-pll.c216
-rw-r--r--drivers/clk/meson/clk-pll.h49
-rw-r--r--drivers/clk/meson/clk-regmap.c5
-rw-r--r--drivers/clk/meson/clk-regmap.h20
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc.h127
-rw-r--r--drivers/clk/meson/g12a-aoclk.c454
-rw-r--r--drivers/clk/meson/g12a-aoclk.h34
-rw-r--r--drivers/clk/meson/g12a.c2359
-rw-r--r--drivers/clk/meson/g12a.h175
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c193
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c268
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h20
-rw-r--r--drivers/clk/meson/gxbb.c296
-rw-r--r--drivers/clk/meson/meson-aoclk.c54
-rw-r--r--drivers/clk/meson/meson-aoclk.h13
-rw-r--r--drivers/clk/meson/meson-eeclk.c63
-rw-r--r--drivers/clk/meson/meson-eeclk.h25
-rw-r--r--drivers/clk/meson/meson8b.c374
-rw-r--r--drivers/clk/meson/meson8b.h11
-rw-r--r--drivers/clk/meson/parm.h46
-rw-r--r--drivers/clk/meson/sclk-div.c10
-rw-r--r--drivers/clk/meson/sclk-div.h (renamed from drivers/clk/meson/clkc-audio.h)16
-rw-r--r--drivers/clk/meson/vid-pll-div.c10
-rw-r--r--drivers/clk/meson/vid-pll-div.h20
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c5
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mvebu/armada-xp.c4
-rw-r--r--drivers/clk/mvebu/dove.c8
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/mvebu/mv98dx3236.c4
-rw-r--r--drivers/clk/qcom/clk-rcg.h5
-rw-r--r--drivers/clk/qcom/clk-rcg2.c24
-rw-r--r--drivers/clk/qcom/clk-rpmh.c146
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c63
-rw-r--r--drivers/clk/qcom/common.c8
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c10
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c11
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c10
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c61
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c10
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c11
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c5
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c10
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c15
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c147
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h4
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c13
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c38
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/samsung/clk.h2
-rw-r--r--drivers/clk/socfpga/clk-gate.c22
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c18
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/apll.c4
-rw-r--r--drivers/clk/ti/autoidle.c101
-rw-r--r--drivers/clk/ti/clk.c80
-rw-r--r--drivers/clk/ti/clkctrl.c4
-rw-r--r--drivers/clk/ti/clock.h5
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/divider.c2
-rw-r--r--drivers/clk/ti/dpll.c11
-rw-r--r--drivers/clk/ti/dpll3xxx.c2
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/interface.c4
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c2
-rw-r--r--drivers/clk/x86/clk-lpt.c2
-rw-r--r--drivers/clk/x86/clk-st.c3
-rw-r--r--drivers/cpufreq/cpufreq.c24
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c4
-rw-r--r--drivers/cpuidle/governor.c1
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/crypto/caam/caamalg.c12
-rw-r--r--drivers/crypto/caam/caamalg_qi.c11
-rw-r--r--drivers/crypto/caam/caamhash.c18
-rw-r--r--drivers/crypto/caam/caampkc.c14
-rw-r--r--drivers/crypto/caam/caamrng.c22
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/dax/super.c38
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c5
-rw-r--r--drivers/dma/bcm2835-dma.c27
-rw-r--r--drivers/dma/dma-axi-dmac.c3
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/dmatest.c269
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h2
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/dw/Makefile2
-rw-r--r--drivers/dma/dw/core.c245
-rw-r--r--drivers/dma/dw/dw.c138
-rw-r--r--drivers/dma/dw/idma32.c160
-rw-r--r--drivers/dma/dw/internal.h15
-rw-r--r--drivers/dma/dw/pci.c53
-rw-r--r--drivers/dma/dw/platform.c22
-rw-r--r--drivers/dma/dw/regs.h30
-rw-r--r--drivers/dma/fsl-edma-common.c70
-rw-r--r--drivers/dma/fsl-edma-common.h4
-rw-r--r--drivers/dma/fsl-edma.c1
-rw-r--r--drivers/dma/fsl-qdma.c1259
-rw-r--r--drivers/dma/fsldma.c16
-rw-r--r--drivers/dma/fsldma.h68
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c49
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c40
-rw-r--r--drivers/dma/ioat/registers.h24
-rw-r--r--drivers/dma/k3dma.c61
-rw-r--r--drivers/dma/mcf-edma.c1
-rw-r--r--drivers/dma/mv_xor.c7
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/qcom/bam_dma.c4
-rw-r--r--drivers/dma/qcom/hidma.c19
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c3
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/sprd-dma.c19
-rw-r--r--drivers/dma/st_fdma.c6
-rw-r--r--drivers/dma/stm32-dma.c71
-rw-r--r--drivers/dma/stm32-dmamux.c58
-rw-r--r--drivers/dma/stm32-mdma.c56
-rw-r--r--drivers/dma/tegra20-apb-dma.c45
-rw-r--r--drivers/dma/tegra210-adma.c5
-rw-r--r--drivers/dma/timb_dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c170
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c9
-rw-r--r--drivers/mailbox/Kconfig11
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox-test.c26
-rw-r--r--drivers/mailbox/stm32-ipcc.c4
-rw-r--r--drivers/mailbox/tegra-hsp.c2
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c725
-rw-r--r--drivers/mtd/ubi/cdev.c30
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/wl.c174
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c6
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c55
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c5
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/nvdimm/btt.c33
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/btt_devs.c8
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/label.c26
-rw-r--r--drivers/nvdimm/namespace_devs.c18
-rw-r--r--drivers/nvdimm/of_pmem.c1
-rw-r--r--drivers/nvdimm/pfn_devs.c24
-rw-r--r--drivers/nvdimm/region_devs.c7
-rw-r--r--drivers/opp/core.c2
-rw-r--r--drivers/opp/of.c16
-rw-r--r--drivers/pwm/Kconfig17
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c10
-rw-r--r--drivers/pwm/pwm-atmel.c111
-rw-r--r--drivers/pwm/pwm-bcm-kona.c16
-rw-r--r--drivers/pwm/pwm-hibvt.c44
-rw-r--r--drivers/pwm/pwm-imx1.c199
-rw-r--r--drivers/pwm/pwm-imx27.c (renamed from drivers/pwm/pwm-imx.c)224
-rw-r--r--drivers/pwm/pwm-mtk-disp.c11
-rw-r--r--drivers/pwm/pwm-rcar.c99
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c6
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c209
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c13
-rw-r--r--drivers/remoteproc/qcom_sysmon.c82
-rw-r--r--drivers/remoteproc/qcom_wcnss.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c160
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c47
-rw-r--r--drivers/remoteproc/remoteproc_internal.h12
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c61
-rw-r--r--drivers/remoteproc/st_remoteproc.c91
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c24
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c1
287 files changed, 13826 insertions, 2981 deletions
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index b58850389094..81bfc6197293 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -97,12 +97,12 @@ static ssize_t acpi_table_aml_read(struct config_item *cfg,
CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE);
-struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
+static struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
&acpi_table_attr_aml,
NULL,
};
-ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -112,7 +112,7 @@ ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature);
}
-ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -122,7 +122,7 @@ ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
return sprintf(str, "%d\n", h->length);
}
-ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -132,7 +132,7 @@ ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
return sprintf(str, "%d\n", h->revision);
}
-ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -142,7 +142,7 @@ ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
}
-ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -152,7 +152,7 @@ ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
}
-ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -162,7 +162,8 @@ ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
return sprintf(str, "%d\n", h->oem_revision);
}
-ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
+ char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -172,8 +173,8 @@ ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str)
return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id);
}
-ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
- char *str)
+static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
+ char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -192,7 +193,7 @@ CONFIGFS_ATTR_RO(acpi_table_, oem_revision);
CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id);
CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision);
-struct configfs_attribute *acpi_table_attrs[] = {
+static struct configfs_attribute *acpi_table_attrs[] = {
&acpi_table_attr_signature,
&acpi_table_attr_length,
&acpi_table_attr_revision,
@@ -232,7 +233,7 @@ static void acpi_table_drop_item(struct config_group *group,
acpi_tb_unload_table(table->index);
}
-struct configfs_group_operations acpi_table_group_ops = {
+static struct configfs_group_operations acpi_table_group_ops = {
.make_item = acpi_table_make_item,
.drop_item = acpi_table_drop_item,
};
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 5f94c35d165f..1e2a10a06b9d 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/clk-lpss.h>
+#include <linux/platform_data/x86/clk-lpss.h>
#include <linux/platform_data/x86/pmc_atom.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 545e91420cde..8940054d6250 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
+ acpi_status status;
int len, count;
int i, nval;
char *c;
- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index e18ade5d74e9..df8979008dd4 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -55,6 +55,10 @@ static bool no_init_ars;
module_param(no_init_ars, bool, 0644);
MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
+static bool force_labels;
+module_param(force_labels, bool, 0444);
+MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
+
LIST_HEAD(acpi_descs);
DEFINE_MUTEX(acpi_desc_lock);
@@ -415,7 +419,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
if (call_pkg) {
int i;
- if (nfit_mem->family != call_pkg->nd_family)
+ if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
return -ENOTTY;
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
@@ -424,6 +428,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
return call_pkg->nd_command;
}
+ /* In the !call_pkg case, bus commands == bus functions */
+ if (!nfit_mem)
+ return cmd;
+
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
return cmd;
@@ -454,17 +462,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd_rc)
*cmd_rc = -EINVAL;
+ if (cmd == ND_CMD_CALL)
+ call_pkg = buf;
+ func = cmd_to_func(nfit_mem, cmd, call_pkg);
+ if (func < 0)
+ return func;
+
if (nvdimm) {
struct acpi_device *adev = nfit_mem->adev;
if (!adev)
return -ENOTTY;
- if (cmd == ND_CMD_CALL)
- call_pkg = buf;
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
- if (func < 0)
- return func;
dimm_name = nvdimm_name(nvdimm);
cmd_name = nvdimm_cmd_name(cmd);
cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -475,12 +484,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
} else {
struct acpi_device *adev = to_acpi_dev(acpi_desc);
- func = cmd;
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
- dsm_mask = cmd_mask;
- if (cmd == ND_CMD_CALL)
- dsm_mask = nd_desc->bus_dsm_mask;
+ dsm_mask = nd_desc->bus_dsm_mask;
desc = nd_cmd_bus_desc(cmd);
guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
@@ -554,6 +560,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return -EINVAL;
}
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
+ dimm_name, cmd_name, out_obj->type);
+ rc = -EINVAL;
+ goto out;
+ }
+
if (call_pkg) {
call_pkg->nd_fw_size = out_obj->buffer.length;
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -572,13 +585,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
- if (out_obj->package.type != ACPI_TYPE_BUFFER) {
- dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
- dimm_name, cmd_name, out_obj->type);
- rc = -EINVAL;
- goto out;
- }
-
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
cmd_name, out_obj->buffer.length);
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
@@ -1317,19 +1323,30 @@ static ssize_t scrub_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus_descriptor *nd_desc;
+ struct acpi_nfit_desc *acpi_desc;
ssize_t rc = -ENXIO;
+ bool busy;
device_lock(dev);
nd_desc = dev_get_drvdata(dev);
- if (nd_desc) {
- struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ if (!nd_desc) {
+ device_unlock(dev);
+ return rc;
+ }
+ acpi_desc = to_acpi_desc(nd_desc);
- mutex_lock(&acpi_desc->init_mutex);
- rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
- acpi_desc->scrub_busy
- && !acpi_desc->cancel ? "+\n" : "\n");
- mutex_unlock(&acpi_desc->init_mutex);
+ mutex_lock(&acpi_desc->init_mutex);
+ busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
+ && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
+ rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
+ /* Allow an admin to poll the busy state at a higher rate */
+ if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
+ &acpi_desc->scrub_flags)) {
+ acpi_desc->scrub_tmo = 1;
+ mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
}
+
+ mutex_unlock(&acpi_desc->init_mutex);
device_unlock(dev);
return rc;
}
@@ -1759,14 +1776,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
{
+ struct device *dev = &nfit_mem->adev->dev;
struct nd_intel_smart smart = { 0 };
union acpi_object in_buf = {
- .type = ACPI_TYPE_BUFFER,
- .buffer.pointer = (char *) &smart,
- .buffer.length = sizeof(smart),
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 0,
};
union acpi_object in_obj = {
- .type = ACPI_TYPE_PACKAGE,
+ .package.type = ACPI_TYPE_PACKAGE,
.package.count = 1,
.package.elements = &in_buf,
};
@@ -1781,8 +1798,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
return;
out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
- if (!out_obj)
+ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
+ || out_obj->buffer.length < sizeof(smart)) {
+ dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
+ dev_name(dev));
+ ACPI_FREE(out_obj);
return;
+ }
+ memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
+ ACPI_FREE(out_obj);
if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
if (smart.shutdown_state)
@@ -1793,7 +1817,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
nfit_mem->dirty_shutdown = smart.shutdown_count;
}
- ACPI_FREE(out_obj);
}
static void populate_shutdown_status(struct nfit_mem *nfit_mem)
@@ -1861,9 +1884,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dev_set_drvdata(&adev_dimm->dev, nfit_mem);
/*
- * Until standardization materializes we need to consider 4
- * different command sets. Note, that checking for function0 (bit0)
- * tells us if any commands are reachable through this GUID.
+ * There are 4 "legacy" NVDIMM command sets
+ * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
+ * an EFI working group was established to constrain this
+ * proliferation. The nfit driver probes for the supported command
+ * set by GUID. Note, if you're a platform developer looking to add
+ * a new command set to this probe, consider using an existing set,
+ * or otherwise seek approval to publish the command set at
+ * http://www.uefi.org/RFIC_LIST.
+ *
+ * Note, that checking for function0 (bit0) tells us if any commands
+ * are reachable through this GUID.
*/
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1886,6 +1917,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dsm_mask &= ~(1 << 8);
} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
dsm_mask = 0xffffffff;
+ } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
+ dsm_mask = 0x1f;
} else {
dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
@@ -1915,18 +1948,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
| 1 << ND_CMD_SET_CONFIG_DATA;
if (family == NVDIMM_FAMILY_INTEL
&& (dsm_mask & label_mask) == label_mask)
- return 0;
+ /* skip _LS{I,R,W} enabling */;
+ else {
+ if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
+ && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
+ dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
+ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
+ }
- if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
- && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
- dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
- set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
- }
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
+ dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
+ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
+ }
- if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
- && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
- dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
- set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
+ /*
+ * Quirk read-only label configurations to preserve
+ * access to label-less namespaces by default.
+ */
+ if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
+ && !force_labels) {
+ dev_dbg(dev, "%s: No _LSW, disable labels\n",
+ dev_name(&adev_dimm->dev));
+ clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
+ } else
+ dev_dbg(dev, "%s: Force enable labels\n",
+ dev_name(&adev_dimm->dev));
}
populate_shutdown_status(nfit_mem);
@@ -2027,6 +2074,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
}
+ /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
+ if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
+ set_bit(NDD_NOBLK, &flags);
+
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
@@ -2050,7 +2101,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
- dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
+ dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
nvdimm_name(nvdimm),
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
@@ -2641,7 +2692,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
if (rc < 0)
return rc;
- return cmd_rc;
+ if (cmd_rc < 0)
+ return cmd_rc;
+ set_bit(ARS_VALID, &acpi_desc->scrub_flags);
+ return 0;
}
static int ars_continue(struct acpi_nfit_desc *acpi_desc)
@@ -2651,11 +2705,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
- memset(&ars_start, 0, sizeof(ars_start));
- ars_start.address = ars_status->restart_address;
- ars_start.length = ars_status->restart_length;
- ars_start.type = ars_status->type;
- ars_start.flags = acpi_desc->ars_start_flags;
+ ars_start = (struct nd_cmd_ars_start) {
+ .address = ars_status->restart_address,
+ .length = ars_status->restart_length,
+ .type = ars_status->type,
+ };
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
sizeof(ars_start), &cmd_rc);
if (rc < 0)
@@ -2734,6 +2788,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
*/
if (ars_status->out_length < 44)
return 0;
+
+ /*
+ * Ignore potentially stale results that are only refreshed
+ * after a start-ARS event.
+ */
+ if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
+ dev_dbg(acpi_desc->dev, "skip %d stale records\n",
+ ars_status->num_records);
+ return 0;
+ }
+
for (i = 0; i < ars_status->num_records; i++) {
/* only process full records */
if (ars_status->out_length
@@ -3004,14 +3069,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
{
int rc;
- if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
return acpi_nfit_register_region(acpi_desc, nfit_spa);
set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
- set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
+ if (!no_init_ars)
+ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
switch (acpi_nfit_query_poison(acpi_desc)) {
case 0:
+ case -ENOSPC:
case -EAGAIN:
rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
/* shouldn't happen, try again later */
@@ -3036,7 +3103,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
break;
case -EBUSY:
case -ENOMEM:
- case -ENOSPC:
/*
* BIOS was using ARS, wait for it to complete (or
* resources to become available) and then perform our
@@ -3071,7 +3137,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
lockdep_assert_held(&acpi_desc->init_mutex);
- if (acpi_desc->cancel)
+ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
return 0;
if (query_rc == -EBUSY) {
@@ -3145,7 +3211,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
{
lockdep_assert_held(&acpi_desc->init_mutex);
- acpi_desc->scrub_busy = 1;
+ set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
/* note this should only be set from within the workqueue */
if (tmo)
acpi_desc->scrub_tmo = tmo;
@@ -3161,7 +3227,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
{
lockdep_assert_held(&acpi_desc->init_mutex);
- acpi_desc->scrub_busy = 0;
+ clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
acpi_desc->scrub_count++;
if (acpi_desc->scrub_count_state)
sysfs_notify_dirent(acpi_desc->scrub_count_state);
@@ -3182,6 +3248,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
else
notify_ars_done(acpi_desc);
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
+ clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -3216,6 +3283,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
struct nfit_spa *nfit_spa;
int rc;
+ set_bit(ARS_VALID, &acpi_desc->scrub_flags);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
@@ -3450,7 +3518,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
struct nfit_spa *nfit_spa;
mutex_lock(&acpi_desc->init_mutex);
- if (acpi_desc->cancel) {
+ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
mutex_unlock(&acpi_desc->init_mutex);
return 0;
}
@@ -3529,7 +3597,7 @@ void acpi_nfit_shutdown(void *data)
mutex_unlock(&acpi_desc_lock);
mutex_lock(&acpi_desc->init_mutex);
- acpi_desc->cancel = 1;
+ set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
@@ -3729,6 +3797,7 @@ static __init int nfit_init(void)
guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+ guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 33691aecfcee..2f8cf2a11e3b 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -34,11 +34,14 @@
/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
+/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */
+#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
+
#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_STANDARD_CMDMASK \
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
@@ -94,6 +97,7 @@ enum nfit_uuids {
NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
+ NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
@@ -210,6 +214,13 @@ struct nfit_mem {
int family;
};
+enum scrub_flags {
+ ARS_BUSY,
+ ARS_CANCEL,
+ ARS_VALID,
+ ARS_POLL,
+};
+
struct acpi_nfit_desc {
struct nvdimm_bus_descriptor nd_desc;
struct acpi_table_header acpi_header;
@@ -223,7 +234,6 @@ struct acpi_nfit_desc {
struct list_head idts;
struct nvdimm_bus *nvdimm_bus;
struct device *dev;
- u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status;
struct nfit_spa *scrub_spa;
struct delayed_work dwork;
@@ -232,8 +242,7 @@ struct acpi_nfit_desc {
unsigned int max_ars;
unsigned int scrub_count;
unsigned int scrub_mode;
- unsigned int scrub_busy:1;
- unsigned int cancel:1;
+ unsigned long scrub_flags;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
unsigned long bus_nfit_cmd_force_en;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index ad31c50de3be..065c4fc245d1 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -209,6 +209,9 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node;
u32 proc_sz;
+ if (table_hdr->revision > 1)
+ return (node->flags & ACPI_PPTT_ACPI_LEAF_NODE);
+
table_end = (unsigned long)table_hdr + table_hdr->length;
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 41324f0b1bee..fa76f5e41b5c 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -648,26 +648,29 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
}
}
-static int get_status(u32 index, acpi_event_status *status,
+static int get_status(u32 index, acpi_event_status *ret,
acpi_handle *handle)
{
- int result;
+ acpi_status status;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
return -EINVAL;
if (index < num_gpes) {
- result = acpi_get_gpe_device(index, handle);
- if (result) {
+ status = acpi_get_gpe_device(index, handle);
+ if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
- return result;
+ return -ENXIO;
}
- result = acpi_get_gpe_status(*handle, index, status);
- } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
- result = acpi_get_event_status(index - num_gpes, status);
+ status = acpi_get_gpe_status(*handle, index, ret);
+ } else {
+ status = acpi_get_event_status(index - num_gpes, ret);
+ }
+ if (ACPI_FAILURE(status))
+ return -EIO;
- return result;
+ return 0;
}
static ssize_t counter_show(struct kobject *kobj,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 2c334c01fc43..76c9969b7124 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,8 @@
* This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
@@ -457,19 +459,19 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
time_start = ktime_get();
ret = genpd->power_off(genpd);
- if (ret == -EBUSY)
+ if (ret)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
- return ret;
+ return 0;
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
- return ret;
+ return 0;
}
/**
@@ -1657,8 +1659,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
- pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
- subdomain->name);
+ pr_warn("%s: unable to remove subdomain %s\n",
+ genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
@@ -1766,8 +1768,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
ret = genpd_set_default_power_state(genpd);
if (ret)
return ret;
- } else if (!gov) {
- pr_warn("%s : no governor for states\n", genpd->name);
+ } else if (!gov && genpd->state_count > 1) {
+ pr_warn("%s: no governor for states\n", genpd->name);
}
device_initialize(&genpd->dev);
@@ -2514,7 +2516,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
&entry_latency);
if (err) {
pr_debug(" * %pOF missing entry-latency-us property\n",
- state_node);
+ state_node);
return -EINVAL;
}
@@ -2522,7 +2524,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
&exit_latency);
if (err) {
pr_debug(" * %pOF missing exit-latency-us property\n",
- state_node);
+ state_node);
return -EINVAL;
}
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 99896fbf18e4..4d07e38a8247 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -128,7 +128,6 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;
-
min_off_time_ns = -1;
/*
* Check if subdomains can be off for enough time.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5a8149829ab3..f80d298de3fa 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -17,6 +17,8 @@
* subsystem list maintains.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -128,7 +130,7 @@ void device_pm_add(struct device *dev)
if (device_pm_not_required(dev))
return;
- pr_debug("PM: Adding info for %s:%s\n",
+ pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
@@ -149,7 +151,7 @@ void device_pm_remove(struct device *dev)
if (device_pm_not_required(dev))
return;
- pr_debug("PM: Removing info for %s:%s\n",
+ pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
@@ -168,7 +170,7 @@ void device_pm_remove(struct device *dev)
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s before %s:%s\n",
+ pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
@@ -182,7 +184,7 @@ void device_pm_move_before(struct device *deva, struct device *devb)
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s after %s:%s\n",
+ pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
@@ -195,7 +197,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)
*/
void device_pm_move_last(struct device *dev)
{
- pr_debug("PM: Moving %s:%s to end of list\n",
+ pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
@@ -418,8 +420,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ pr_err("Device %s failed to %s%s: error %d\n",
+ dev_name(dev), pm_verb(state.event), info, error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -2022,8 +2024,7 @@ int dpm_prepare(pm_message_t state)
error = 0;
continue;
}
- printk(KERN_INFO "PM: Device %s not prepared "
- "for power transition: code %d\n",
+ pr_info("Device %s not prepared for power transition: code %d\n",
dev_name(dev), error);
put_device(dev);
break;
@@ -2062,7 +2063,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, void *fn, int ret)
{
if (ret)
- printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
+ pr_err("%s(): %pF returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c511def48b48..ec33fbdb919b 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,6 +21,7 @@ static inline void pm_runtime_early_init(struct device *dev)
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3382542b39b7..f80e402ef778 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -22,7 +22,7 @@
* per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
- * . The per-device constraints data struct ptr is tored into the device
+ * . The per-device constraints data struct ptr is stored into the device
* dev_pm_info.
* . To minimize the data usage by the per-device constraints, the data struct
* is only allocated at the first call to dev_pm_qos_add_request.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index a80dbf08a99c..977db40378b0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -64,7 +64,7 @@ static int rpm_suspend(struct device *dev, int rpmflags);
* runtime_status field is updated, to account the time in the old state
* correctly.
*/
-void update_pm_runtime_accounting(struct device *dev)
+static void update_pm_runtime_accounting(struct device *dev)
{
u64 now, last, delta;
@@ -98,7 +98,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
dev->power.runtime_status = status;
}
-u64 pm_runtime_suspended_time(struct device *dev)
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
{
u64 time;
unsigned long flags;
@@ -106,12 +106,22 @@ u64 pm_runtime_suspended_time(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
update_pm_runtime_accounting(dev);
- time = dev->power.suspended_time;
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
spin_unlock_irqrestore(&dev->power.lock, flags);
return time;
}
+
+u64 pm_runtime_active_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, false);
+}
+
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
/**
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index c6bf76124184..1226e441ddfe 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -125,13 +125,9 @@ static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- u64 tmp;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- tmp = dev->power.active_time;
+ u64 tmp = pm_runtime_active_time(dev);
do_div(tmp, NSEC_PER_MSEC);
ret = sprintf(buf, "%llu\n", tmp);
- spin_unlock_irq(&dev->power.lock);
return ret;
}
@@ -141,13 +137,9 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- u64 tmp;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- tmp = dev->power.suspended_time;
+ u64 tmp = pm_runtime_suspended_time(dev);
do_div(tmp, NSEC_PER_MSEC);
ret = sprintf(buf, "%llu\n", tmp);
- spin_unlock_irq(&dev->power.lock);
return ret;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index b11f47a1e819..2bd9d2c744ca 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -7,6 +7,8 @@
* devices may be working.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index f1fee72ed970..bb1ae175fae1 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -6,6 +6,8 @@
* This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
@@ -106,23 +108,6 @@ struct wakeup_source *wakeup_source_create(const char *name)
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
-/**
- * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
- * @ws: Wakeup source to prepare for destruction.
- *
- * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
- * be run in parallel with this function for the same wakeup source object.
- */
-void wakeup_source_drop(struct wakeup_source *ws)
-{
- if (!ws)
- return;
-
- del_timer_sync(&ws->timer);
- __pm_relax(ws);
-}
-EXPORT_SYMBOL_GPL(wakeup_source_drop);
-
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
*/
@@ -162,7 +147,7 @@ void wakeup_source_destroy(struct wakeup_source *ws)
if (!ws)
return;
- wakeup_source_drop(ws);
+ __pm_relax(ws);
wakeup_source_record(ws);
kfree_const(ws->name);
kfree(ws);
@@ -205,6 +190,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
+
+ del_timer_sync(&ws->timer);
+ /*
+ * Clear timer.function to make wakeup_source_not_registered() treat
+ * this wakeup source as not registered.
+ */
+ ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -853,7 +845,7 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_debug("PM: Wakeup pending, aborting suspend\n");
+ pr_debug("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index d2f0bb5ba47e..e705aab9e38b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -290,6 +290,12 @@ config COMMON_CLK_BD718XX
This driver supports ROHM BD71837 and ROHM BD71847
PMICs clock gates.
+config COMMON_CLK_FIXED_MMIO
+ bool "Clock driver for Memory Mapped Fixed values"
+ depends on COMMON_CLK && OF
+ help
+ Support for Memory Mapped IO Fixed clocks
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 8a9440a97500..1db133652f0c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
@@ -78,7 +79,7 @@ obj-$(CONFIG_ARCH_K3) += keystone/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-y += mediatek/
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/
+obj-$(CONFIG_ARCH_MESON) += meson/
obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index 04f0a6355726..5b45ca35757e 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -9,6 +9,11 @@ if CLK_ACTIONS
# SoC Drivers
+config CLK_OWL_S500
+ bool "Support for the Actions Semi OWL S500 clocks"
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ default ARCH_ACTIONS
+
config CLK_OWL_S700
bool "Support for the Actions Semi OWL S700 clocks"
depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index ccfdf9781cef..a2588e55c790 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -10,5 +10,6 @@ clk-owl-y += owl-pll.o
clk-owl-y += owl-reset.o
# SoC support
+obj-$(CONFIG_CLK_OWL_S500) += owl-s500.o
obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 058e06d7099f..02437bdedf4d 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -179,7 +179,7 @@ static int owl_pll_set_rate(struct clk_hw *hw, unsigned long rate,
regmap_write(common->regmap, pll_hw->reg, reg);
- udelay(PLL_STABILITY_WAIT_US);
+ udelay(pll_hw->delay);
return 0;
}
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 0aae30abd5dc..6fb0d45bb088 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -13,6 +13,8 @@
#include "owl-common.h"
+#define OWL_PLL_DEF_DELAY 50
+
/* last entry should have rate = 0 */
struct clk_pll_table {
unsigned int val;
@@ -27,6 +29,7 @@ struct owl_pll_hw {
u8 width;
u8 min_mul;
u8 max_mul;
+ u8 delay;
const struct clk_pll_table *table;
};
@@ -36,7 +39,7 @@ struct owl_pll {
};
#define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
- _width, _min_mul, _max_mul, _table) \
+ _width, _min_mul, _max_mul, _delay, _table) \
{ \
.reg = _reg, \
.bfreq = _bfreq, \
@@ -45,6 +48,7 @@ struct owl_pll {
.width = _width, \
.min_mul = _min_mul, \
.max_mul = _max_mul, \
+ .delay = _delay, \
.table = _table, \
}
@@ -52,8 +56,8 @@ struct owl_pll {
_shift, _width, _min_mul, _max_mul, _table, _flags) \
struct owl_pll _struct = { \
.pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
- _width, _min_mul, \
- _max_mul, _table), \
+ _width, _min_mul, _max_mul, \
+ OWL_PLL_DEF_DELAY, _table), \
.common = { \
.regmap = NULL, \
.hw.init = CLK_HW_INIT(_name, \
@@ -67,8 +71,23 @@ struct owl_pll {
_shift, _width, _min_mul, _max_mul, _table, _flags) \
struct owl_pll _struct = { \
.pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
- _width, _min_mul, \
- _max_mul, _table), \
+ _width, _min_mul, _max_mul, \
+ OWL_PLL_DEF_DELAY, _table), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \
+ &owl_pll_ops, \
+ _flags), \
+ }, \
+ }
+
+#define OWL_PLL_NO_PARENT_DELAY(_struct, _name, _reg, _bfreq, _bit_idx, \
+ _shift, _width, _min_mul, _max_mul, _delay, _table, \
+ _flags) \
+ struct owl_pll _struct = { \
+ .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
+ _width, _min_mul, _max_mul, \
+ _delay, _table), \
.common = { \
.regmap = NULL, \
.hw.init = CLK_HW_INIT_NO_PARENT(_name, \
@@ -78,7 +97,6 @@ struct owl_pll {
}
#define mul_mask(m) ((1 << ((m)->width)) - 1)
-#define PLL_STABILITY_WAIT_US (50)
static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
{
diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
new file mode 100644
index 000000000000..e2007ac4d235
--- /dev/null
+++ b/drivers/clk/actions/owl-s500.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi Owl S500 SoC clock driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * Copyright (c) 2018 LSI-TEC - Caninos Loucos
+ * Author: Edgar Bernardi Righi <edgar.righi@lsitec.org.br>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "owl-common.h"
+#include "owl-composite.h"
+#include "owl-divider.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-gate.h"
+#include "owl-mux.h"
+#include "owl-pll.h"
+
+#include <dt-bindings/clock/actions,s500-cmu.h>
+
+#define CMU_COREPLL (0x0000)
+#define CMU_DEVPLL (0x0004)
+#define CMU_DDRPLL (0x0008)
+#define CMU_NANDPLL (0x000C)
+#define CMU_DISPLAYPLL (0x0010)
+#define CMU_AUDIOPLL (0x0014)
+#define CMU_TVOUTPLL (0x0018)
+#define CMU_BUSCLK (0x001C)
+#define CMU_SENSORCLK (0x0020)
+#define CMU_LCDCLK (0x0024)
+#define CMU_DSICLK (0x0028)
+#define CMU_CSICLK (0x002C)
+#define CMU_DECLK (0x0030)
+#define CMU_BISPCLK (0x0034)
+#define CMU_BUSCLK1 (0x0038)
+#define CMU_VDECLK (0x0040)
+#define CMU_VCECLK (0x0044)
+#define CMU_NANDCCLK (0x004C)
+#define CMU_SD0CLK (0x0050)
+#define CMU_SD1CLK (0x0054)
+#define CMU_SD2CLK (0x0058)
+#define CMU_UART0CLK (0x005C)
+#define CMU_UART1CLK (0x0060)
+#define CMU_UART2CLK (0x0064)
+#define CMU_PWM4CLK (0x0068)
+#define CMU_PWM5CLK (0x006C)
+#define CMU_PWM0CLK (0x0070)
+#define CMU_PWM1CLK (0x0074)
+#define CMU_PWM2CLK (0x0078)
+#define CMU_PWM3CLK (0x007C)
+#define CMU_USBPLL (0x0080)
+#define CMU_ETHERNETPLL (0x0084)
+#define CMU_CVBSPLL (0x0088)
+#define CMU_LENSCLK (0x008C)
+#define CMU_GPU3DCLK (0x0090)
+#define CMU_CORECTL (0x009C)
+#define CMU_DEVCLKEN0 (0x00A0)
+#define CMU_DEVCLKEN1 (0x00A4)
+#define CMU_DEVRST0 (0x00A8)
+#define CMU_DEVRST1 (0x00AC)
+#define CMU_UART3CLK (0x00B0)
+#define CMU_UART4CLK (0x00B4)
+#define CMU_UART5CLK (0x00B8)
+#define CMU_UART6CLK (0x00BC)
+#define CMU_SSCLK (0x00C0)
+#define CMU_DIGITALDEBUG (0x00D0)
+#define CMU_ANALOGDEBUG (0x00D4)
+#define CMU_COREPLLDEBUG (0x00D8)
+#define CMU_DEVPLLDEBUG (0x00DC)
+#define CMU_DDRPLLDEBUG (0x00E0)
+#define CMU_NANDPLLDEBUG (0x00E4)
+#define CMU_DISPLAYPLLDEBUG (0x00E8)
+#define CMU_TVOUTPLLDEBUG (0x00EC)
+#define CMU_DEEPCOLORPLLDEBUG (0x00F4)
+#define CMU_AUDIOPLL_ETHPLLDEBUG (0x00F8)
+#define CMU_CVBSPLLDEBUG (0x00FC)
+
+#define OWL_S500_COREPLL_DELAY (150)
+#define OWL_S500_DDRPLL_DELAY (63)
+#define OWL_S500_DEVPLL_DELAY (28)
+#define OWL_S500_NANDPLL_DELAY (44)
+#define OWL_S500_DISPLAYPLL_DELAY (57)
+#define OWL_S500_ETHERNETPLL_DELAY (25)
+#define OWL_S500_AUDIOPLL_DELAY (100)
+
+static const struct clk_pll_table clk_audio_pll_table[] = {
+ { 0, 45158400 }, { 1, 49152000 },
+ { 0, 0 },
+};
+
+/* pll clocks */
+static OWL_PLL_NO_PARENT_DELAY(ethernet_pll_clk, "ethernet_pll_clk", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, OWL_S500_ETHERNETPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(core_pll_clk, "core_pll_clk", CMU_COREPLL, 12000000, 9, 0, 8, 4, 134, OWL_S500_COREPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(ddr_pll_clk, "ddr_pll_clk", CMU_DDRPLL, 12000000, 8, 0, 8, 1, 67, OWL_S500_DDRPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(nand_pll_clk, "nand_pll_clk", CMU_NANDPLL, 6000000, 8, 0, 7, 2, 86, OWL_S500_NANDPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(display_pll_clk, "display_pll_clk", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 126, OWL_S500_DISPLAYPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(dev_pll_clk, "dev_pll_clk", CMU_DEVPLL, 6000000, 8, 0, 7, 8, 126, OWL_S500_DEVPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(audio_pll_clk, "audio_pll_clk", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, OWL_S500_AUDIOPLL_DELAY, clk_audio_pll_table, CLK_IGNORE_UNUSED);
+
+static const char * const dev_clk_mux_p[] = { "hosc", "dev_pll_clk" };
+static const char * const bisp_clk_mux_p[] = { "display_pll_clk", "dev_clk" };
+static const char * const sensor_clk_mux_p[] = { "hosc", "bisp_clk" };
+static const char * const sd_clk_mux_p[] = { "dev_clk", "nand_pll_clk" };
+static const char * const pwm_clk_mux_p[] = { "losc", "hosc" };
+static const char * const ahbprediv_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" };
+static const char * const uart_clk_mux_p[] = { "hosc", "dev_pll_clk" };
+static const char * const de_clk_mux_p[] = { "display_pll_clk", "dev_clk" };
+static const char * const i2s_clk_mux_p[] = { "audio_pll_clk" };
+static const char * const hde_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" };
+static const char * const nand_clk_mux_p[] = { "nand_pll_clk", "display_pll_clk", "dev_clk", "ddr_pll_clk" };
+
+static struct clk_factor_table sd_factor_table[] = {
+ /* bit0 ~ 4 */
+ { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
+ { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
+ { 8, 1, 9 }, { 9, 1, 10 }, { 10, 1, 11 }, { 11, 1, 12 },
+ { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
+ { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
+ { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
+ { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
+ { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
+
+ /* bit8: /128 */
+ { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
+ { 260, 1, 5 * 128 }, { 261, 1, 6 * 128 }, { 262, 1, 7 * 128 }, { 263, 1, 8 * 128 },
+ { 264, 1, 9 * 128 }, { 265, 1, 10 * 128 }, { 266, 1, 11 * 128 }, { 267, 1, 12 * 128 },
+ { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
+ { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
+ { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
+ { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
+ { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
+ { 0, 0, 0 },
+};
+
+static struct clk_factor_table bisp_factor_table[] = {
+ { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
+ { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
+ { 0, 0, 0 },
+};
+
+static struct clk_factor_table ahb_factor_table[] = {
+ { 1, 1, 2 }, { 2, 1, 3 },
+ { 0, 0, 0 },
+};
+
+static struct clk_div_table rmii_ref_div_table[] = {
+ { 0, 4 }, { 1, 10 },
+ { 0, 0 },
+};
+
+static struct clk_div_table i2s_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
+ { 8, 24 },
+ { 0, 0 },
+};
+
+static struct clk_div_table nand_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 },
+ { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 },
+ { 0, 0 },
+};
+
+/* mux clock */
+static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
+
+/* gate clocks */
+static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi3_clk, "spi3_clk", "ahb_clk", CMU_DEVCLKEN1, 13, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
+static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+
+/* divider clocks */
+static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
+
+/* factor clocks */
+static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
+static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
+static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
+
+/* composite clocks */
+static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
+ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
+ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
+ OWL_MUX_HW(CMU_BISPCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 5, 0),
+ OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd1_clk, "sd1_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD1CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 6, 0),
+ OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd2_clk, "sd2_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD2CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 7, 0),
+ OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(pwm0_clk, "pwm0_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM0CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
+ OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm1_clk, "pwm1_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM1CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 24, 0),
+ OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm2_clk, "pwm2_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM2CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 25, 0),
+ OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm3_clk, "pwm3_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM3CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
+ OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm4_clk, "pwm4_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM4CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
+ OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm5_clk, "pwm5_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM5CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0),
+ OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_PASS(de_clk, "de_clk", de_clk_mux_p,
+ OWL_MUX_HW(CMU_DECLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
+ 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c0_clk, "i2c0_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c1_clk, "i2c1_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 15, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c2_clk, "i2c2_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 30, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
+ 1, 5, 0);
+
+static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART1CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 7, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(i2stx_clk, "i2stx_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 20, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(hdmia_clk, "hdmia_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(spdif_clk, "spdif_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 28, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(nand_clk, "nand_clk", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, nand_div_table),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_DIV(ecc_clk, "ecc_clk", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 4, 3, 0, nand_div_table),
+ CLK_SET_RATE_PARENT);
+
+static struct owl_clk_common *s500_clks[] = {
+ &ethernet_pll_clk.common,
+ &core_pll_clk.common,
+ &ddr_pll_clk.common,
+ &dev_pll_clk.common,
+ &nand_pll_clk.common,
+ &audio_pll_clk.common,
+ &display_pll_clk.common,
+ &dev_clk.common,
+ &timer_clk.common,
+ &i2c0_clk.common,
+ &i2c1_clk.common,
+ &i2c2_clk.common,
+ &i2c3_clk.common,
+ &uart0_clk.common,
+ &uart1_clk.common,
+ &uart2_clk.common,
+ &uart3_clk.common,
+ &uart4_clk.common,
+ &uart5_clk.common,
+ &uart6_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &pwm3_clk.common,
+ &pwm4_clk.common,
+ &pwm5_clk.common,
+ &sensor0_clk.common,
+ &sensor1_clk.common,
+ &sd0_clk.common,
+ &sd1_clk.common,
+ &sd2_clk.common,
+ &bisp_clk.common,
+ &ahb_clk.common,
+ &ahbprediv_clk.common,
+ &h_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spi3_clk.common,
+ &rmii_ref_clk.common,
+ &de_clk.common,
+ &de1_clk.common,
+ &de2_clk.common,
+ &i2srx_clk.common,
+ &i2stx_clk.common,
+ &hdmia_clk.common,
+ &hdmi_clk.common,
+ &vce_clk.common,
+ &vde_clk.common,
+ &spdif_clk.common,
+ &nand_clk.common,
+ &ecc_clk.common,
+};
+
+static struct clk_hw_onecell_data s500_hw_clks = {
+ .hws = {
+ [CLK_ETHERNET_PLL] = &ethernet_pll_clk.common.hw,
+ [CLK_CORE_PLL] = &core_pll_clk.common.hw,
+ [CLK_DDR_PLL] = &ddr_pll_clk.common.hw,
+ [CLK_NAND_PLL] = &nand_pll_clk.common.hw,
+ [CLK_DISPLAY_PLL] = &display_pll_clk.common.hw,
+ [CLK_DEV_PLL] = &dev_pll_clk.common.hw,
+ [CLK_AUDIO_PLL] = &audio_pll_clk.common.hw,
+ [CLK_TIMER] = &timer_clk.common.hw,
+ [CLK_DEV] = &dev_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_DE1] = &de1_clk.common.hw,
+ [CLK_DE2] = &de2_clk.common.hw,
+ [CLK_I2C0] = &i2c0_clk.common.hw,
+ [CLK_I2C1] = &i2c1_clk.common.hw,
+ [CLK_I2C2] = &i2c2_clk.common.hw,
+ [CLK_I2C3] = &i2c3_clk.common.hw,
+ [CLK_I2SRX] = &i2srx_clk.common.hw,
+ [CLK_I2STX] = &i2stx_clk.common.hw,
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART1] = &uart1_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_SENSOR0] = &sensor0_clk.common.hw,
+ [CLK_SENSOR1] = &sensor1_clk.common.hw,
+ [CLK_SD0] = &sd0_clk.common.hw,
+ [CLK_SD1] = &sd1_clk.common.hw,
+ [CLK_SD2] = &sd2_clk.common.hw,
+ [CLK_BISP] = &bisp_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_AHB] = &ahb_clk.common.hw,
+ [CLK_H] = &h_clk.common.hw,
+ [CLK_AHBPREDIV] = &ahbprediv_clk.common.hw,
+ [CLK_RMII_REF] = &rmii_ref_clk.common.hw,
+ [CLK_HDMI_AUDIO] = &hdmia_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_VDE] = &vde_clk.common.hw,
+ [CLK_VCE] = &vce_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_ECC] = &ecc_clk.common.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static struct owl_clk_desc s500_clk_desc = {
+ .clks = s500_clks,
+ .num_clks = ARRAY_SIZE(s500_clks),
+
+ .hw_clks = &s500_hw_clks,
+};
+
+static int s500_clk_probe(struct platform_device *pdev)
+{
+ struct owl_clk_desc *desc;
+
+ desc = &s500_clk_desc;
+ owl_clk_regmap_init(pdev, desc);
+
+ return owl_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static const struct of_device_id s500_clk_of_match[] = {
+ { .compatible = "actions,s500-cmu", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver s500_clk_driver = {
+ .probe = s500_clk_probe,
+ .driver = {
+ .name = "s500-cmu",
+ .of_match_table = s500_clk_of_match,
+ },
+};
+
+static int __init s500_clk_init(void)
+{
+ return platform_driver_register(&s500_clk_driver);
+}
+core_initcall(s500_clk_init);
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index 36d77146a3bd..3cc4a82f4e9f 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -340,7 +340,12 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
rate, *parent_rate);
- for (div = 1; div <= AUDIO_PLL_QDPMC_MAX; div++) {
+ if (!rate)
+ return 0;
+
+ best_parent_rate = clk_round_rate(pclk->clk, 1);
+ div = max(best_parent_rate / rate, 1UL);
+ for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
best_parent_rate = clk_round_rate(pclk->clk, rate * div);
tmp_rate = best_parent_rate / div;
tmp_diff = abs(rate - tmp_rate);
@@ -350,6 +355,8 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
+ if (!best_diff)
+ break; /* got exact match */
}
}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 5bc68b9c5498..89d6f3736dbf 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -132,11 +132,8 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned long div = parent_rate / rate;
- unsigned int pckr;
int shift = 0;
- regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
-
if (!div)
return -EINVAL;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index cd0ef7274fdb..1f70cb164b06 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -241,13 +241,14 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
parent_names[4] = "masterck";
+ parent_names[5] = "audiopll_pmcck";
for (i = 0; i < 3; i++) {
char name[6];
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, 6, i,
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 2c04396402ab..c36c47bdba02 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -44,21 +44,21 @@ struct clps711x_clk {
struct clk_hw_onecell_data clk_data;
};
-static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
- u32 fref)
+static void __init clps711x_clk_init_dt(struct device_node *np)
{
- u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi;
+ u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi, fref = 0;
struct clps711x_clk *clps711x_clk;
- unsigned i;
+ void __iomem *base;
+
+ WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
- if (!base)
- return ERR_PTR(-ENOMEM);
+ base = of_iomap(np, 0);
+ BUG_ON(!base);
clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
CLPS711X_CLK_MAX),
GFP_KERNEL);
- if (!clps711x_clk)
- return ERR_PTR(-ENOMEM);
+ BUG_ON(!clps711x_clk);
spin_lock_init(&clps711x_clk->lock);
@@ -137,52 +137,13 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
- for (i = 0; i < CLPS711X_CLK_MAX; i++)
- if (IS_ERR(clps711x_clk->clk_data.hws[i]))
+ for (tmp = 0; tmp < CLPS711X_CLK_MAX; tmp++)
+ if (IS_ERR(clps711x_clk->clk_data.hws[tmp]))
pr_err("clk %i: register failed with %ld\n",
- i, PTR_ERR(clps711x_clk->clk_data.hws[i]));
-
- return clps711x_clk;
-}
-
-void __init clps711x_clk_init(void __iomem *base)
-{
- struct clps711x_clk *clps711x_clk;
-
- clps711x_clk = _clps711x_clk_init(base, 73728000);
-
- BUG_ON(IS_ERR(clps711x_clk));
-
- /* Clocksource */
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1],
- NULL, "clps711x-timer.0");
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2],
- NULL, "clps711x-timer.1");
-
- /* Drivers */
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM],
- NULL, "clps711x-pwm");
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
- NULL, "clps711x-uart.0");
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
- NULL, "clps711x-uart.1");
-}
-
-#ifdef CONFIG_OF
-static void __init clps711x_clk_init_dt(struct device_node *np)
-{
- void __iomem *base = of_iomap(np, 0);
- struct clps711x_clk *clps711x_clk;
- u32 fref = 0;
-
- WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
-
- clps711x_clk = _clps711x_clk_init(base, fref);
- BUG_ON(IS_ERR(clps711x_clk));
+ tmp, PTR_ERR(clps711x_clk->clk_data.hws[tmp]));
clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
&clps711x_clk->clk_data);
}
CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
-#endif
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index c9a86156ced8..daa1fc8fba53 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -29,6 +29,17 @@ struct clk *devm_clk_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL(devm_clk_get);
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+ struct clk *clk = devm_clk_get(dev, id);
+
+ if (clk == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return clk;
+}
+EXPORT_SYMBOL(devm_clk_get_optional);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
new file mode 100644
index 000000000000..d1a97d971183
--- /dev/null
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Memory Mapped IO Fixed clock driver
+ *
+ * Copyright (C) 2018 Cadence Design Systems, Inc.
+ *
+ * Authors:
+ * Jan Kotas <jank@cadence.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
+{
+ struct clk_hw *clk;
+ const char *clk_name = node->name;
+ void __iomem *base;
+ u32 freq;
+ int ret;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%pOFn: failed to map address\n", node);
+ return ERR_PTR(-EIO);
+ }
+
+ freq = readl(base);
+ iounmap(base);
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0, freq);
+ if (IS_ERR(clk)) {
+ pr_err("%pOFn: failed to register fixed rate clock\n", node);
+ return clk;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk);
+ if (ret) {
+ pr_err("%pOFn: failed to add clock provider\n", node);
+ clk_hw_unregister(clk);
+ clk = ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+static void __init of_fixed_mmio_clk_setup(struct device_node *node)
+{
+ fixed_mmio_clk_setup(node);
+}
+CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
+
+/**
+ * This is not executed when of_fixed_mmio_clk_setup succeeded.
+ */
+static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
+{
+ struct clk_hw *clk;
+
+ clk = fixed_mmio_clk_setup(pdev->dev.of_node);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ platform_set_drvdata(pdev, clk);
+
+ return 0;
+}
+
+static int of_fixed_mmio_clk_remove(struct platform_device *pdev)
+{
+ struct clk_hw *clk = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+ clk_hw_unregister_fixed_rate(clk);
+
+ return 0;
+}
+
+static const struct of_device_id of_fixed_mmio_clk_ids[] = {
+ { .compatible = "fixed-mmio-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_fixed_mmio_clk_ids);
+
+static struct platform_driver of_fixed_mmio_clk_driver = {
+ .driver = {
+ .name = "of_fixed_mmio_clk",
+ .of_match_table = of_fixed_mmio_clk_ids,
+ },
+ .probe = of_fixed_mmio_clk_probe,
+ .remove = of_fixed_mmio_clk_remove,
+};
+module_platform_driver(of_fixed_mmio_clk_driver);
+
+MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
+MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 545dceec0bbf..fdfe2e423d15 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -79,7 +79,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long m, n;
u64 ret;
- if (!rate || rate >= *parent_rate)
+ if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
return *parent_rate;
if (fd->approximation)
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 25eed3e0251f..c2f07f0d077c 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -58,6 +58,35 @@ const struct clk_ops clk_gpio_gate_ops = {
};
EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
+static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value_cansleep(clk->gpiod, 1);
+
+ return 0;
+}
+
+static void clk_sleeping_gpio_gate_unprepare(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value_cansleep(clk->gpiod, 0);
+}
+
+static int clk_sleeping_gpio_gate_is_prepared(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ return gpiod_get_value_cansleep(clk->gpiod);
+}
+
+static const struct clk_ops clk_sleeping_gpio_gate_ops = {
+ .prepare = clk_sleeping_gpio_gate_prepare,
+ .unprepare = clk_sleeping_gpio_gate_unprepare,
+ .is_prepared = clk_sleeping_gpio_gate_is_prepared,
+};
+
/**
* DOC: basic clock multiplexer which can be controlled with a gpio output
* Traits of this clock:
@@ -144,10 +173,16 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, struct gpio_desc *gpiod,
unsigned long flags)
{
+ const struct clk_ops *ops;
+
+ if (gpiod_cansleep(gpiod))
+ ops = &clk_sleeping_gpio_gate_ops;
+ else
+ ops = &clk_gpio_gate_ops;
+
return clk_register_gpio(dev, name,
(parent_name ? &parent_name : NULL),
- (parent_name ? 1 : 0), gpiod, flags,
- &clk_gpio_gate_ops);
+ (parent_name ? 1 : 0), gpiod, flags, ops);
}
EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 727ed8e1bb72..8e4581004695 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
/* Map system registers */
srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
hb_clk->reg = of_iomap(srnp, 0);
+ of_node_put(srnp);
BUG_ON(!hb_clk->reg);
hb_clk->reg += reg;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 22c937644c93..3727d5472450 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -235,8 +235,9 @@ static int max77686_clk_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_hw_register_clkdev(&max_clk_data->hw,
- max_clk_data->clk_idata.name, NULL);
+ ret = devm_clk_hw_register_clkdev(dev, &max_clk_data->hw,
+ max_clk_data->clk_idata.name,
+ NULL);
if (ret < 0) {
dev_err(dev, "Failed to clkdev register: %d\n", ret);
return ret;
@@ -244,8 +245,8 @@ static int max77686_clk_probe(struct platform_device *pdev)
}
if (parent->of_node) {
- ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get,
- drv_data);
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_max77686_get,
+ drv_data);
if (ret < 0) {
dev_err(dev, "Failed to register OF clock provider: %d\n",
@@ -261,27 +262,11 @@ static int max77686_clk_probe(struct platform_device *pdev)
1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
if (ret < 0) {
dev_err(dev, "Failed to config low-jitter: %d\n", ret);
- goto remove_of_clk_provider;
+ return ret;
}
}
return 0;
-
-remove_of_clk_provider:
- if (parent->of_node)
- of_clk_del_provider(parent->of_node);
-
- return ret;
-}
-
-static int max77686_clk_remove(struct platform_device *pdev)
-{
- struct device *parent = pdev->dev.parent;
-
- if (parent->of_node)
- of_clk_del_provider(parent->of_node);
-
- return 0;
}
static const struct platform_device_id max77686_clk_id[] = {
@@ -297,7 +282,6 @@ static struct platform_driver max77686_clk_driver = {
.name = "max77686-clk",
},
.probe = max77686_clk_probe,
- .remove = max77686_clk_remove,
.id_table = max77686_clk_id,
};
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 5baa9e051110..1212a9be7e80 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1148,8 +1148,8 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
pll->div[i].clk = clk;
ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
if (ret != 0)
- pr_err("%s: %s: register to lookup table failed %ld\n",
- __func__, pll->div[i].name, PTR_ERR(clk));
+ pr_err("%s: %s: register to lookup table failed %d\n",
+ __func__, pll->div[i].name, ret);
}
}
@@ -1389,6 +1389,7 @@ static void __init clockgen_init(struct device_node *np)
pr_err("%s: Couldn't map %pOF regs\n", __func__,
guts);
}
+ of_node_put(guts);
}
}
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 6a31f7f434ce..a0ae8dc16909 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -121,7 +121,7 @@ static const char * const cpu_src[] = {
};
static const char * const axi_src[] = {
- "ck_hsi", "ck_hse", "pll2_p", "pll3_p"
+ "ck_hsi", "ck_hse", "pll2_p"
};
static const char * const per_src[] = {
@@ -225,19 +225,19 @@ static const char * const usart6_src[] = {
};
static const char * const fdcan_src[] = {
- "ck_hse", "pll3_q", "pll4_q"
+ "ck_hse", "pll3_q", "pll4_q", "pll4_r"
};
static const char * const sai_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
};
static const char * const sai2_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
};
static const char * const adc12_src[] = {
- "pll4_q", "ck_per"
+ "pll4_r", "ck_per", "pll3_q"
};
static const char * const dsi_src[] = {
@@ -269,7 +269,7 @@ static const struct clk_div_table axi_div_table[] = {
static const struct clk_div_table mcu_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
- { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 },
+ { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
{ 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
{ 0 },
};
@@ -1286,10 +1286,11 @@ _clk_stm32_register_composite(struct device *dev,
MGATE_MP1(_id, _name, _parent, _flags, _mgate)
#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
- COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\
- _MGATE_MP1(_mgate),\
- _MMUX(_mmux),\
- _NO_DIV)
+ COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
+ CLK_SET_RATE_NO_REPARENT | _flags,\
+ _MGATE_MP1(_mgate),\
+ _MMUX(_mmux),\
+ _NO_DIV)
enum {
G_SAI1,
@@ -1655,12 +1656,14 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
static const struct clock_config stm32mp1_clock_cfg[] = {
/* Oscillator divider */
- DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2,
- CLK_DIVIDER_READ_ONLY),
+ DIV(NO_ID, "clk-hsi-div", "clk-hsi", CLK_DIVIDER_POWER_OF_TWO,
+ RCC_HSICFGR, 0, 2, CLK_DIVIDER_READ_ONLY),
/* External / Internal Oscillators */
GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
- GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
+ /* ck_csi is used by IO compensation and should be critical */
+ GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
+ RCC_OCENSETR, 4, 0),
GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
@@ -1952,14 +1955,14 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
- COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE,
+ COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
_NO_GATE,
_MMUX(M_ETHCK),
- _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)),
+ _DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)),
/* RTC clock */
- DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
- CLK_DIVIDER_ALLOW_ZERO),
+ DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 6, 0),
COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index ea846f77750b..0cad5748bf0e 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
return pdmclk->enabled;
}
+static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
+ unsigned int reg)
+{
+ const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
+ int ret;
+
+ ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
+ if (ret < 0)
+ return ret;
+
+ ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
+ * Cold Temperature". This affects cold boot and deeper idle states it
+ * seems. The workaround consists of resetting HPPLL and LPPLL.
+ */
+static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
+{
+ int ret;
+
+ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
+ if (ret)
+ return ret;
+
+ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int twl6040_pdmclk_prepare(struct clk_hw *hw)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
@@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
int ret;
ret = twl6040_power(pdmclk->twl6040, 1);
- if (!ret)
- pdmclk->enabled = 1;
+ if (ret)
+ return ret;
+
+ ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
+ if (ret)
+ goto out_err;
+
+ pdmclk->enabled = 1;
+
+ return 0;
+
+out_err:
+ dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
+ twl6040_power(pdmclk->twl6040, 0);
return ret;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d2477a5058ac..96053a96fe2f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -57,6 +57,7 @@ struct clk_core {
struct clk_core *new_child;
unsigned long flags;
bool orphan;
+ bool rpm_enabled;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
@@ -81,6 +82,7 @@ struct clk_core {
struct clk {
struct clk_core *core;
+ struct device *dev;
const char *dev_id;
const char *con_id;
unsigned long min_rate;
@@ -92,9 +94,9 @@ struct clk {
/*** runtime pm ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
- int ret = 0;
+ int ret;
- if (!core->dev)
+ if (!core->rpm_enabled)
return 0;
ret = pm_runtime_get_sync(core->dev);
@@ -103,7 +105,7 @@ static int clk_pm_runtime_get(struct clk_core *core)
static void clk_pm_runtime_put(struct clk_core *core)
{
- if (!core->dev)
+ if (!core->rpm_enabled)
return;
pm_runtime_put_sync(core->dev);
@@ -223,7 +225,7 @@ static bool clk_core_is_enabled(struct clk_core *core)
* taking enable spinlock, but the below check is needed if one tries
* to call it from other places.
*/
- if (core->dev) {
+ if (core->rpm_enabled) {
pm_runtime_get_noresume(core->dev);
if (!pm_runtime_active(core->dev)) {
ret = false;
@@ -233,7 +235,7 @@ static bool clk_core_is_enabled(struct clk_core *core)
ret = core->ops->is_enabled(core->hw);
done:
- if (core->dev)
+ if (core->rpm_enabled)
pm_runtime_put(core->dev);
return ret;
@@ -394,16 +396,19 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
{
return clk_core_is_prepared(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
bool clk_hw_rate_is_protected(const struct clk_hw *hw)
{
return clk_core_rate_is_protected(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
bool __clk_is_enabled(struct clk *clk)
{
@@ -3209,43 +3214,106 @@ unlock:
return ret;
}
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+/**
+ * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
+ * @core: clk to add consumer to
+ * @clk: consumer to link to a clk
+ */
+static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
+{
+ clk_prepare_lock();
+ hlist_add_head(&clk->clks_node, &core->clks);
+ clk_prepare_unlock();
+}
+
+/**
+ * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
+ * @clk: consumer to unlink
+ */
+static void clk_core_unlink_consumer(struct clk *clk)
+{
+ lockdep_assert_held(&prepare_lock);
+ hlist_del(&clk->clks_node);
+}
+
+/**
+ * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
+ * @core: clk to allocate a consumer for
+ * @dev_id: string describing device name
+ * @con_id: connection ID string on device
+ *
+ * Returns: clk consumer left unlinked from the consumer list
+ */
+static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
const char *con_id)
{
struct clk *clk;
- /* This is to allow this function to be chained to others */
- if (IS_ERR_OR_NULL(hw))
- return ERR_CAST(hw);
-
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return ERR_PTR(-ENOMEM);
- clk->core = hw->core;
+ clk->core = core;
clk->dev_id = dev_id;
clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
clk->max_rate = ULONG_MAX;
- clk_prepare_lock();
- hlist_add_head(&clk->clks_node, &hw->core->clks);
- clk_prepare_unlock();
-
return clk;
}
-/* keep in sync with __clk_put */
-void __clk_free_clk(struct clk *clk)
+/**
+ * free_clk - Free a clk consumer
+ * @clk: clk consumer to free
+ *
+ * Note, this assumes the clk has been unlinked from the clk_core consumer
+ * list.
+ */
+static void free_clk(struct clk *clk)
{
- clk_prepare_lock();
- hlist_del(&clk->clks_node);
- clk_prepare_unlock();
-
kfree_const(clk->con_id);
kfree(clk);
}
/**
+ * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
+ * a clk_hw
+ * @dev: clk consumer device
+ * @hw: clk_hw associated with the clk being consumed
+ * @dev_id: string describing device name
+ * @con_id: connection ID string on device
+ *
+ * This is the main function used to create a clk pointer for use by clk
+ * consumers. It connects a consumer to the clk_core and clk_hw structures
+ * used by the framework and clk provider respectively.
+ */
+struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
+ const char *dev_id, const char *con_id)
+{
+ struct clk *clk;
+ struct clk_core *core;
+
+ /* This is to allow this function to be chained to others */
+ if (IS_ERR_OR_NULL(hw))
+ return ERR_CAST(hw);
+
+ core = hw->core;
+ clk = alloc_clk(core, dev_id, con_id);
+ if (IS_ERR(clk))
+ return clk;
+ clk->dev = dev;
+
+ if (!try_module_get(core->owner)) {
+ free_clk(clk);
+ return ERR_PTR(-ENOENT);
+ }
+
+ kref_get(&core->ref);
+ clk_core_link_consumer(core, clk);
+
+ return clk;
+}
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -3280,7 +3348,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
core->ops = hw->init->ops;
if (dev && pm_runtime_enabled(dev))
- core->dev = dev;
+ core->rpm_enabled = true;
+ core->dev = dev;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
@@ -3320,17 +3389,27 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
INIT_HLIST_HEAD(&core->clks);
- hw->clk = __clk_create_clk(hw, NULL, NULL);
+ /*
+ * Don't call clk_hw_create_clk() here because that would pin the
+ * provider module to itself and prevent it from ever being removed.
+ */
+ hw->clk = alloc_clk(core, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
goto fail_parents;
}
+ clk_core_link_consumer(hw->core, hw->clk);
+
ret = __clk_core_init(core);
if (!ret)
return hw->clk;
- __clk_free_clk(hw->clk);
+ clk_prepare_lock();
+ clk_core_unlink_consumer(hw->clk);
+ clk_prepare_unlock();
+
+ free_clk(hw->clk);
hw->clk = NULL;
fail_parents:
@@ -3601,20 +3680,7 @@ EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
/*
* clkdev helpers
*/
-int __clk_get(struct clk *clk)
-{
- struct clk_core *core = !clk ? NULL : clk->core;
-
- if (core) {
- if (!try_module_get(core->owner))
- return 0;
- kref_get(&core->ref);
- }
- return 1;
-}
-
-/* keep in sync with __clk_free_clk */
void __clk_put(struct clk *clk)
{
struct module *owner;
@@ -3648,8 +3714,7 @@ void __clk_put(struct clk *clk)
module_put(owner);
- kfree_const(clk->con_id);
- kfree(clk);
+ free_clk(clk);
}
/*** clk rate change notifiers ***/
@@ -4006,6 +4071,49 @@ void devm_of_clk_del_provider(struct device *dev)
}
EXPORT_SYMBOL(devm_of_clk_del_provider);
+/*
+ * Beware the return values when np is valid, but no clock provider is found.
+ * If name == NULL, the function returns -ENOENT.
+ * If name != NULL, the function returns -EINVAL. This is because
+ * of_parse_phandle_with_args() is called even if of_property_match_string()
+ * returns an error.
+ */
+static int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name, struct of_phandle_args *out_args)
+{
+ int ret = -ENOENT;
+
+ /* Walk up the tree of devices looking for a clock property that matches */
+ while (np) {
+ /*
+ * For named clocks, first look up the name in the
+ * "clock-names" property. If it cannot be found, then index
+ * will be an error code and of_parse_phandle_with_args() will
+ * return -EINVAL.
+ */
+ if (name)
+ index = of_property_match_string(np, "clock-names", name);
+ ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
+ index, out_args);
+ if (!ret)
+ break;
+ if (name && index >= 0)
+ break;
+
+ /*
+ * No matching clock found on this node. If the parent node
+ * has a "clock-ranges" property, then we can try one of its
+ * clocks.
+ */
+ np = np->parent;
+ if (np && !of_get_property(np, "clock-ranges", NULL))
+ break;
+ index = 0;
+ }
+
+ return ret;
+}
+
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
struct of_phandle_args *clkspec)
@@ -4021,36 +4129,26 @@ __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
return __clk_get_hw(clk);
}
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id)
+static struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
- struct clk *clk = ERR_PTR(-EPROBE_DEFER);
- struct clk_hw *hw;
+ struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
if (!clkspec)
return ERR_PTR(-EINVAL);
- /* Check if we have such a provider in our array */
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
hw = __of_clk_get_hw_from_provider(provider, clkspec);
- clk = __clk_create_clk(hw, dev_id, con_id);
- }
-
- if (!IS_ERR(clk)) {
- if (!__clk_get(clk)) {
- __clk_free_clk(clk);
- clk = ERR_PTR(-ENOENT);
- }
-
- break;
+ if (!IS_ERR(hw))
+ break;
}
}
mutex_unlock(&of_clk_mutex);
- return clk;
+ return hw;
}
/**
@@ -4063,10 +4161,62 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
*/
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
- return __of_clk_get_from_provider(clkspec, NULL, __func__);
+ struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
+
+ return clk_hw_create_clk(NULL, hw, NULL, __func__);
}
EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
+struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
+ const char *con_id)
+{
+ int ret;
+ struct clk_hw *hw;
+ struct of_phandle_args clkspec;
+
+ ret = of_parse_clkspec(np, index, con_id, &clkspec);
+ if (ret)
+ return ERR_PTR(ret);
+
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
+ of_node_put(clkspec.np);
+
+ return hw;
+}
+
+static struct clk *__of_clk_get(struct device_node *np,
+ int index, const char *dev_id,
+ const char *con_id)
+{
+ struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
+
+ return clk_hw_create_clk(NULL, hw, dev_id, con_id);
+}
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+ return __of_clk_get(np, index, np->full_name, NULL);
+}
+EXPORT_SYMBOL(of_clk_get);
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ return __of_clk_get(np, 0, np->full_name, name);
+}
+EXPORT_SYMBOL(of_clk_get_by_name);
+
/**
* of_clk_get_parent_count() - Count the number of clocks a device node has
* @np: device node to count
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index b02f5e604e69..553f531cc232 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -5,31 +5,36 @@
*/
struct clk_hw;
+struct device;
+struct of_phandle_args;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id);
+struct clk_hw *of_clk_get_hw(struct device_node *np,
+ int index, const char *con_id);
+#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+static inline struct clk_hw *of_clk_get_hw(struct device_node *np,
+ int index, const char *con_id)
+{
+ return ERR_PTR(-ENOENT);
+}
#endif
#ifdef CONFIG_COMMON_CLK
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
- const char *con_id);
-void __clk_free_clk(struct clk *clk);
-int __clk_get(struct clk *clk);
+struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
+ const char *dev_id, const char *con_id);
void __clk_put(struct clk *clk);
#else
/* All these casts to avoid ifdefs in clkdev... */
static inline struct clk *
-__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
+clk_hw_create_clk(struct device *dev, struct clk_hw *hw, const char *dev_id,
+ const char *con_id)
{
return (struct clk *)hw;
}
-static inline void __clk_free_clk(struct clk *clk) { }
static struct clk_hw *__clk_get_hw(struct clk *clk)
{
return (struct clk_hw *)clk;
}
-static inline int __clk_get(struct clk *clk) { return 1; }
static inline void __clk_put(struct clk *clk) { }
#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 9ab3db8b3988..8c4435c53f09 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -27,99 +27,6 @@
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
-static struct clk *__of_clk_get(struct device_node *np, int index,
- const char *dev_id, const char *con_id)
-{
- struct of_phandle_args clkspec;
- struct clk *clk;
- int rc;
-
- rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
- &clkspec);
- if (rc)
- return ERR_PTR(rc);
-
- clk = __of_clk_get_from_provider(&clkspec, dev_id, con_id);
- of_node_put(clkspec.np);
-
- return clk;
-}
-
-struct clk *of_clk_get(struct device_node *np, int index)
-{
- return __of_clk_get(np, index, np->full_name, NULL);
-}
-EXPORT_SYMBOL(of_clk_get);
-
-static struct clk *__of_clk_get_by_name(struct device_node *np,
- const char *dev_id,
- const char *name)
-{
- struct clk *clk = ERR_PTR(-ENOENT);
-
- /* Walk up the tree of devices looking for a clock that matches */
- while (np) {
- int index = 0;
-
- /*
- * For named clocks, first look up the name in the
- * "clock-names" property. If it cannot be found, then
- * index will be an error code, and of_clk_get() will fail.
- */
- if (name)
- index = of_property_match_string(np, "clock-names", name);
- clk = __of_clk_get(np, index, dev_id, name);
- if (!IS_ERR(clk)) {
- break;
- } else if (name && index >= 0) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_err("ERROR: could not get clock %pOF:%s(%i)\n",
- np, name ? name : "", index);
- return clk;
- }
-
- /*
- * No matching clock found on this node. If the parent node
- * has a "clock-ranges" property, then we can try one of its
- * clocks.
- */
- np = np->parent;
- if (np && !of_get_property(np, "clock-ranges", NULL))
- break;
- }
-
- return clk;
-}
-
-/**
- * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
- * @np: pointer to clock consumer node
- * @name: name of consumer's clock input, or NULL for the first clock reference
- *
- * This function parses the clocks and clock-names properties,
- * and uses them to look up the struct clk from the registered list of clock
- * providers.
- */
-struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
-{
- if (!np)
- return ERR_PTR(-ENOENT);
-
- return __of_clk_get_by_name(np, np->full_name, name);
-}
-EXPORT_SYMBOL(of_clk_get_by_name);
-
-#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
-
-static struct clk *__of_clk_get_by_name(struct device_node *np,
- const char *dev_id,
- const char *name)
-{
- return ERR_PTR(-ENOENT);
-}
-#endif
-
/*
* Find the correct struct clk for the device and connection ID.
* We do slightly fuzzy matching here:
@@ -163,7 +70,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
return cl;
}
-struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
+ const char *con_id)
{
struct clk_lookup *cl;
struct clk *clk = NULL;
@@ -174,35 +82,33 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id)
if (!cl)
goto out;
- clk = __clk_create_clk(cl->clk_hw, dev_id, con_id);
+ clk = clk_hw_create_clk(dev, cl->clk_hw, dev_id, con_id);
if (IS_ERR(clk))
- goto out;
-
- if (!__clk_get(clk)) {
- __clk_free_clk(clk);
cl = NULL;
- goto out;
- }
-
out:
mutex_unlock(&clocks_mutex);
return cl ? clk : ERR_PTR(-ENOENT);
}
+
+struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ return __clk_get_sys(NULL, dev_id, con_id);
+}
EXPORT_SYMBOL(clk_get_sys);
struct clk *clk_get(struct device *dev, const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
- struct clk *clk;
+ struct clk_hw *hw;
if (dev && dev->of_node) {
- clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
- return clk;
+ hw = of_clk_get_hw(dev->of_node, 0, con_id);
+ if (!IS_ERR(hw) || PTR_ERR(hw) == -EPROBE_DEFER)
+ return clk_hw_create_clk(dev, hw, dev_id, con_id);
}
- return clk_get_sys(dev_id, con_id);
+ return __clk_get_sys(dev, dev_id, con_id);
}
EXPORT_SYMBOL(clk_get);
@@ -401,6 +307,23 @@ static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
return cl;
}
+static int do_clk_register_clkdev(struct clk_hw *hw,
+ struct clk_lookup **cl, const char *con_id, const char *dev_id)
+{
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ /*
+ * Since dev_id can be NULL, and NULL is handled specially, we must
+ * pass it as either a NULL format string, or with "%s".
+ */
+ if (dev_id)
+ *cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
+ else
+ *cl = __clk_register_clkdev(hw, con_id, NULL);
+
+ return *cl ? 0 : -ENOMEM;
+}
+
/**
* clk_register_clkdev - register one clock lookup for a struct clk
* @clk: struct clk to associate with all clk_lookups
@@ -423,17 +346,8 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
if (IS_ERR(clk))
return PTR_ERR(clk);
- /*
- * Since dev_id can be NULL, and NULL is handled specially, we must
- * pass it as either a NULL format string, or with "%s".
- */
- if (dev_id)
- cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
- dev_id);
- else
- cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
-
- return cl ? 0 : -ENOMEM;
+ return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
+ dev_id);
}
EXPORT_SYMBOL(clk_register_clkdev);
@@ -456,18 +370,75 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
{
struct clk_lookup *cl;
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
+}
+EXPORT_SYMBOL(clk_hw_register_clkdev);
- /*
- * Since dev_id can be NULL, and NULL is handled specially, we must
- * pass it as either a NULL format string, or with "%s".
- */
- if (dev_id)
- cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
- else
- cl = __clk_register_clkdev(hw, con_id, NULL);
+static void devm_clkdev_release(struct device *dev, void *res)
+{
+ clkdev_drop(*(struct clk_lookup **)res);
+}
+
+static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
+{
+ struct clk_lookup **l = res;
- return cl ? 0 : -ENOMEM;
+ return *l == data;
}
-EXPORT_SYMBOL(clk_hw_register_clkdev);
+
+/**
+ * devm_clk_release_clkdev - Resource managed clkdev lookup release
+ * @dev: device this lookup is bound
+ * @con_id: connection ID string on device
+ * @dev_id: format string describing device name
+ *
+ * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_clk_release_clkdev(struct device *dev, const char *con_id,
+ const char *dev_id)
+{
+ struct clk_lookup *cl;
+ int rval;
+
+ cl = clk_find(dev_id, con_id);
+ WARN_ON(!cl);
+ rval = devres_release(dev, devm_clkdev_release,
+ devm_clk_match_clkdev, cl);
+ WARN_ON(rval);
+}
+EXPORT_SYMBOL(devm_clk_release_clkdev);
+
+/**
+ * devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
+ * @dev: device this lookup is bound
+ * @hw: struct clk_hw to associate with all clk_lookups
+ * @con_id: connection ID string on device
+ * @dev_id: format string describing device name
+ *
+ * con_id or dev_id may be NULL as a wildcard, just as in the rest of
+ * clkdev.
+ *
+ * To make things easier for mass registration, we detect error clk_hws
+ * from a previous clk_hw_register_*() call, and return the error code for
+ * those. This is to permit this function to be called immediately
+ * after clk_hw_register_*().
+ */
+int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
+ const char *con_id, const char *dev_id)
+{
+ int rval = -ENOMEM;
+ struct clk_lookup **cl;
+
+ cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
+ if (cl) {
+ rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
+ if (!rval)
+ devres_add(dev, cl);
+ else
+ devres_free(cl);
+ }
+ return rval;
+}
+EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 4aae31a23449..0eaf41848280 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -8,6 +8,12 @@ config MXC_CLK_SCU
bool
depends on IMX_SCU
+config CLK_IMX8MM
+ bool "IMX8MM CCM Clock Driver"
+ depends on ARCH_MXC && ARM64
+ help
+ Build the driver for i.MX8MM CCM Clock Driver
+
config CLK_IMX8MQ
bool "IMX8MQ CCM Clock Driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 73119fbfa547..0d5180fbe988 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -18,12 +18,14 @@ obj-$(CONFIG_MXC_CLK) += \
clk-pllv2.o \
clk-pllv3.o \
clk-pllv4.o \
- clk-sccg-pll.o
+ clk-sccg-pll.o \
+ clk-pll14xx.o
obj-$(CONFIG_MXC_CLK_SCU) += \
clk-scu.o \
clk-lpcg-scu.o
+obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 527ade1d6933..574fac1a169f 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
};
struct clk *imx8m_clk_composite_flags(const char *name,
- const char **parent_names,
+ const char * const *parent_names,
int num_parents, void __iomem *reg,
unsigned long flags)
{
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index fc8e782d817b..e91c826bce70 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -428,6 +428,7 @@ static void __init mx51_clocks_init(struct device_node *np)
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
+ clk[IMX5_CLK_SCC2_IPG_GATE] = imx_clk_gate2("scc2_gate", "ipg", MXC_CCM_CCGR1, 30);
clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL);
clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL);
clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 716eac3136b4..708e7c5590dd 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -471,6 +471,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
/* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 18527a335ace..91558b09bf9e 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -151,6 +151,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 06c105d580a4..cfbd8d4edb85 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -404,6 +404,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 4e18f629f823..ce306631e844 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -48,8 +48,8 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_SCG1_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
+ GFP_KERNEL);
if (!clk_data)
return;
@@ -136,8 +136,8 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_PCC2_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
+ GFP_KERNEL);
if (!clk_data)
return;
@@ -183,8 +183,8 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_PCC3_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
+ GFP_KERNEL);
if (!clk_data)
return;
@@ -228,8 +228,8 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_SMC1_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
+ GFP_KERNEL);
if (!clk_data)
return;
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
new file mode 100644
index 000000000000..1ef8438e3d6d
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017-2018 NXP.
+ */
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+static u32 share_count_sai4;
+static u32 share_count_sai5;
+static u32 share_count_sai6;
+static u32 share_count_dcss;
+static u32 share_count_pdm;
+static u32 share_count_nand;
+
+#define PLL_1416X_RATE(_rate, _m, _p, _s) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_1443X_RATE(_rate, _m, _p, _s, _k) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ }
+
+static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
+ PLL_1416X_RATE(1800000000U, 225, 3, 0),
+ PLL_1416X_RATE(1600000000U, 200, 3, 0),
+ PLL_1416X_RATE(1200000000U, 300, 3, 1),
+ PLL_1416X_RATE(1000000000U, 250, 3, 1),
+ PLL_1416X_RATE(800000000U, 200, 3, 1),
+ PLL_1416X_RATE(750000000U, 250, 2, 2),
+ PLL_1416X_RATE(700000000U, 350, 3, 2),
+ PLL_1416X_RATE(600000000U, 300, 3, 2),
+};
+
+static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
+ PLL_1443X_RATE(786432000U, 655, 5, 2, 23593),
+ PLL_1443X_RATE(722534400U, 301, 5, 1, 3670),
+};
+
+static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+};
+
+static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+};
+
+static struct imx_pll14xx_clk imx8mm_audio_pll __initdata = {
+ .type = PLL_1443X,
+ .rate_table = imx8mm_audiopll_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_video_pll __initdata = {
+ .type = PLL_1443X,
+ .rate_table = imx8mm_videopll_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_dram_pll __initdata = {
+ .type = PLL_1443X,
+ .rate_table = imx8mm_drampll_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_arm_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_gpu_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_vpu_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_sys_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+static const char *dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char *sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
+static const char *sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
+static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+/* CCM ROOT */
+static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
+ "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", };
+
+static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m",
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
+
+static const char *imx8mm_vpu_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
+ "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "vpu_pll_out", };
+
+static const char *imx8mm_gpu3d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m", "sys_pll2_250m",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "sys_pll1_100m",};
+
+static const char *imx8mm_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_250m",
+ "sys_pll2_200m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
+
+static const char *imx8mm_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_200m",
+ "sys_pll1_133m", "sys_pll3_out", "sys_pll2_250m", "audio_pll1_out", };
+
+static const char *imx8mm_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out", "audio_pll2_out",
+ "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_200m", "sys_pll1_100m", };
+
+static const char *imx8mm_disp_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
+
+static const char *imx8mm_disp_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
+
+static const char *imx8mm_disp_rtrm_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll2_200m", "sys_pll2_1000m",
+ "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mm_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_500m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_noc_apb_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out", "sys_pll2_333m", "sys_pll2_200m",
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mm_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m", "sys_pll1_400m",
+ "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mm_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mm_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m", "sys_pll2_500m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll1_out", "sys_pll1_266m", };
+
+static const char *imx8mm_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", };
+
+static const char *imx8mm_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", };
+
+static const char *imx8mm_disp_dtrc_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", };
+
+static const char *imx8mm_disp_dc8000_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", };
+
+static const char *imx8mm_pcie1_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", };
+
+static const char *imx8mm_pcie1_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "sys_pll1_400m", };
+
+static const char *imx8mm_pcie1_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
+ "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char *imx8mm_dc_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char *imx8mm_lcdif_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char *imx8mm_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", };
+
+static const char *imx8mm_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mm_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", };
+
+static const char *imx8mm_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mm_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_spdif2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mm_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m",
+ "sys_pll1_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
+
+static const char *imx8mm_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "video_pll1_out", };
+
+static const char *imx8mm_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", "sys_pll2_200m",
+ "sys_pll2_500m", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m",
+ "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out", };
+
+static const char *imx8mm_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "audio_pll2_out", "sys_pll1_266m", "sys_pll3_out", "sys_pll1_100m", };
+
+static const char *imx8mm_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
+
+static const char *imx8mm_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
+
+static const char *imx8mm_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mm_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mm_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_usb_core_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys3_pll2_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", "sys_pll1_40m",
+ "video_pll1_out", "sys_pll1_800m", "audio_pll1_out", "clk_ext1" };
+
+static const char *imx8mm_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out",
+ "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", };
+
+static const char *imx8mm_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", "sys_pll3_out", "sys_pll2_200m",
+ "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m", };
+
+static const char *imx8mm_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", };
+
+static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi1_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_csi2_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", };
+
+static const char *imx8mm_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1",
+ "clk_ext2", "clk_ext3", "clk_ext4", "sys_pll1_400m", };
+
+static const char *imx8mm_pcie2_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
+ "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char *imx8mm_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };
+
+static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk",
+ "vpu_pll", "sys_pll1_80m", };
+
+static struct clk *clks[IMX8MM_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX8MM_CLK_UART1_ROOT],
+ &clks[IMX8MM_CLK_UART2_ROOT],
+ &clks[IMX8MM_CLK_UART3_ROOT],
+ &clks[IMX8MM_CLK_UART4_ROOT],
+ NULL
+};
+
+static int __init imx8mm_clocks_init(struct device_node *ccm_node)
+{
+ struct device_node *np;
+ void __iomem *base;
+ int ret;
+
+ clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+ clks[IMX8MM_CLK_24M] = of_clk_get_by_name(ccm_node, "osc_24m");
+ clks[IMX8MM_CLK_32K] = of_clk_get_by_name(ccm_node, "osc_32k");
+ clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(ccm_node, "clk_ext1");
+ clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(ccm_node, "clk_ext2");
+ clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(ccm_node, "clk_ext3");
+ clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(ccm_node, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return -ENOMEM;
+
+ clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll);
+ clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll);
+ clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll);
+ clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll);
+ clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll);
+ clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll);
+ clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll);
+ clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll);
+ clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll);
+ clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll);
+
+ /* PLL bypass out */
+ clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+ /* unbypass all the plls */
+ clk_set_parent(clks[IMX8MM_AUDIO_PLL1_BYPASS], clks[IMX8MM_AUDIO_PLL1]);
+ clk_set_parent(clks[IMX8MM_AUDIO_PLL2_BYPASS], clks[IMX8MM_AUDIO_PLL2]);
+ clk_set_parent(clks[IMX8MM_VIDEO_PLL1_BYPASS], clks[IMX8MM_VIDEO_PLL1]);
+ clk_set_parent(clks[IMX8MM_DRAM_PLL_BYPASS], clks[IMX8MM_DRAM_PLL]);
+ clk_set_parent(clks[IMX8MM_GPU_PLL_BYPASS], clks[IMX8MM_GPU_PLL]);
+ clk_set_parent(clks[IMX8MM_VPU_PLL_BYPASS], clks[IMX8MM_VPU_PLL]);
+ clk_set_parent(clks[IMX8MM_ARM_PLL_BYPASS], clks[IMX8MM_ARM_PLL]);
+ clk_set_parent(clks[IMX8MM_SYS_PLL1_BYPASS], clks[IMX8MM_SYS_PLL1]);
+ clk_set_parent(clks[IMX8MM_SYS_PLL2_BYPASS], clks[IMX8MM_SYS_PLL2]);
+ clk_set_parent(clks[IMX8MM_SYS_PLL3_BYPASS], clks[IMX8MM_SYS_PLL3]);
+
+ /* PLL out gate */
+ clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+ clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+ clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 13);
+ clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 13);
+ clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 13);
+ clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 13);
+ clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 13);
+ clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 13);
+
+ /* SYS PLL fixed output */
+ clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+ clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+ clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+ clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+ clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+ clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+ clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+ clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+
+ clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+ clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+ clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+ clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+ clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+ clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+ clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+ clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return -ENOMEM;
+
+ /* Core Slice */
+ clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
+ clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
+ clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
+ clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
+ clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
+ clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+ clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+ clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+ clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
+ clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
+ clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+ clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+ clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
+ clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+
+ /* BUS */
+ clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
+ clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+ clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+ clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+ clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+ clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+ clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+ clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+ clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+ clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
+ clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
+
+ /* AHB */
+ clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
+ clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+
+ /* IPG */
+ clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+ /* IP */
+ clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
+ clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
+ clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
+ clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
+ clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
+ clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
+ clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
+ clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
+ clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
+ clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
+ clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
+ clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
+ clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
+ clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
+ clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
+ clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
+ clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
+ clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
+ clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
+ clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
+ clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
+ clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
+ clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00);
+ clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
+ clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
+ clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
+ clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
+ clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
+ clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
+ clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
+ clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
+ clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
+ clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
+ clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
+ clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
+ clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
+ clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
+ clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
+ clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
+ clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
+ clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
+ clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
+ clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
+ clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
+ clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
+ clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+ clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
+ clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
+ clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
+ clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
+ clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
+ clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
+ clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
+ clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
+ clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
+ clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
+ clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
+ clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
+ clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
+ clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
+ clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
+ clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
+
+ /* CCGR */
+ clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+ clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+ clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+ clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+ clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+ clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+ clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
+ clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+ clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
+ clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+ clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
+ clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
+ clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+ clks[IMX8MM_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+ clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+ clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+ clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
+ clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+ clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+ clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+ clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+
+ clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8);
+
+ clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+ clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
+
+ clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
+ clks[IMX8MM_CLK_A53_DIV],
+ clks[IMX8MM_CLK_A53_SRC],
+ clks[IMX8MM_ARM_PLL_OUT],
+ clks[IMX8MM_CLK_24M]);
+
+ imx_check_clocks(clks, ARRAY_SIZE(clks));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ if (ret < 0) {
+ pr_err("failed to register clks for i.MX8MM\n");
+ return -EINVAL;
+ }
+
+ imx_register_uart_clocks(uart_clks);
+
+ return 0;
+}
+CLK_OF_DECLARE_DRIVER(imx8mm, "fsl,imx8mm-ccm", imx8mm_clocks_init);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 26b57f43ccc3..a9b3888aef0c 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -26,246 +26,246 @@ static u32 share_count_nand;
static struct clk *clks[IMX8MQ_CLK_END];
-static const char *pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
-static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
-static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
-static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
-static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
-static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
-static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
-
-static const char *sys1_pll1_out_sels[] = {"sys1_pll1", "sys1_pll1_ref_sel", };
-static const char *sys2_pll1_out_sels[] = {"sys2_pll1", "sys1_pll1_ref_sel", };
-static const char *sys3_pll1_out_sels[] = {"sys3_pll1", "sys3_pll1_ref_sel", };
-static const char *dram_pll1_out_sels[] = {"dram_pll1", "dram_pll1_ref_sel", };
-
-static const char *sys1_pll2_out_sels[] = {"sys1_pll2_div", "sys1_pll1_ref_sel", };
-static const char *sys2_pll2_out_sels[] = {"sys2_pll2_div", "sys2_pll1_ref_sel", };
-static const char *sys3_pll2_out_sels[] = {"sys3_pll2_div", "sys2_pll1_ref_sel", };
-static const char *dram_pll2_out_sels[] = {"dram_pll2_div", "dram_pll1_ref_sel", };
+static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
+static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+
+static const char * const sys1_pll_out_sels[] = {"sys1_pll1_ref_sel", };
+static const char * const sys2_pll_out_sels[] = {"sys1_pll1_ref_sel", "sys2_pll1_ref_sel", };
+static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", "sys2_pll1_ref_sel", };
+static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", };
/* CCM ROOT */
-static const char *imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
+static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
"sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", };
-static const char *imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
+static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m",
+ "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
+
+static const char * const imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
"sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", };
-static const char *imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
+static const char * const imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
"sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
+static const char * const imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
"sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m",
+static const char * const imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m",
"sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",};
-static const char *imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m",
+static const char * const imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m",
"sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
-static const char *imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m",
+static const char * const imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m",
"sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", };
-static const char *imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", };
+static const char * const imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", };
-static const char *imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
+static const char * const imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
-static const char *imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out",
+static const char * const imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out",
"sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
-static const char *imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m",
+static const char * const imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m",
"audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m",
+static const char * const imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m",
"sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
-static const char *imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
+static const char * const imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
+static const char * const imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m",
+static const char * const imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m",
+static const char * const imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m",
"sys1_pll_800m", "audio_pll1_out", "video_pll1_out", };
-static const char *imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m",
+static const char * const imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m",
"sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
-static const char *imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m",
+static const char * const imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m",
"sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
-static const char *imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"};
-static const char *imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m",
+static const char * const imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m",
"sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", };
-static const char *imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
+static const char * const imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
-static const char *imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
+static const char * const imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
-static const char *imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
+static const char * const imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
-static const char *imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
+static const char * const imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
-static const char *imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
+static const char * const imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
"sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", };
-static const char *imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2",
+static const char * const imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4", };
-static const char *imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out",
+static const char * const imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out",
"sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
-static const char *imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
+static const char * const imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
-static const char *imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
+static const char * const imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
-static const char *imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
+static const char * const imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
-static const char *imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+static const char * const imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+static const char * const imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
-static const char *imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
+static const char * const imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
-static const char *imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+static const char * const imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+static const char * const imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
-static const char *imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+static const char * const imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+static const char * const imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
-static const char *imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m",
+static const char * const imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m",
"sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
-static const char *imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+static const char * const imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4", "video_pll1_out", };
-static const char *imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m",
+static const char * const imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m",
+static const char * const imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m",
"audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", };
-static const char *imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
-static const char *imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
-static const char *imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
-static const char *imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
-static const char *imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
-static const char *imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
+static const char * const imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
"sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
+static const char * const imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
"sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m",
+static const char * const imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m",
"sys1_pll_80m", "audio_pll1_out", "clk_ext1", };
-static const char *imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out",
+static const char * const imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out",
"sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", };
-static const char *imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m",
+static const char * const imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m",
"sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", };
-static const char *imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+static const char * const imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+static const char * const imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
+static const char * const imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
"sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", };
-static const char *imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1",
+static const char * const imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1",
"clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", };
-static const char *imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out",
+static const char * const imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out",
"sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
-static const char *imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+static const char * const imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
-static const char *imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", "audio_pll1_out",
- "video_pll1_out", "ckil", };
+static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "osc_27m", "sys1_pll_200m",
+ "audio_pll2_out", "sys2_pll_500m", "vpu_pll_out", "sys1_pll_80m", };
+static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
+ "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", "ckil", };
static struct clk_onecell_data clk_data;
@@ -308,10 +308,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
- clks[IMX8MQ_SYS1_PLL1_REF_DIV] = imx_clk_divider("sys1_pll1_ref_div", "sys1_pll1_ref_sel", base + 0x38, 25, 3);
- clks[IMX8MQ_SYS2_PLL1_REF_DIV] = imx_clk_divider("sys2_pll1_ref_div", "sys2_pll1_ref_sel", base + 0x44, 25, 3);
- clks[IMX8MQ_SYS3_PLL1_REF_DIV] = imx_clk_divider("sys3_pll1_ref_div", "sys3_pll1_ref_sel", base + 0x50, 25, 3);
- clks[IMX8MQ_DRAM_PLL1_REF_DIV] = imx_clk_divider("dram_pll1_ref_div", "dram_pll1_ref_sel", base + 0x68, 25, 3);
clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
@@ -319,43 +315,15 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
- clks[IMX8MQ_SYS1_PLL1] = imx_clk_sccg_pll("sys1_pll1", "sys1_pll1_ref_div", base + 0x30, SCCG_PLL1);
- clks[IMX8MQ_SYS2_PLL1] = imx_clk_sccg_pll("sys2_pll1", "sys2_pll1_ref_div", base + 0x3c, SCCG_PLL1);
- clks[IMX8MQ_SYS3_PLL1] = imx_clk_sccg_pll("sys3_pll1", "sys3_pll1_ref_div", base + 0x48, SCCG_PLL1);
- clks[IMX8MQ_DRAM_PLL1] = imx_clk_sccg_pll("dram_pll1", "dram_pll1_ref_div", base + 0x60, SCCG_PLL1);
-
- clks[IMX8MQ_SYS1_PLL2] = imx_clk_sccg_pll("sys1_pll2", "sys1_pll1_out_div", base + 0x30, SCCG_PLL2);
- clks[IMX8MQ_SYS2_PLL2] = imx_clk_sccg_pll("sys2_pll2", "sys2_pll1_out_div", base + 0x3c, SCCG_PLL2);
- clks[IMX8MQ_SYS3_PLL2] = imx_clk_sccg_pll("sys3_pll2", "sys3_pll1_out_div", base + 0x48, SCCG_PLL2);
- clks[IMX8MQ_DRAM_PLL2] = imx_clk_sccg_pll("dram_pll2", "dram_pll1_out_div", base + 0x60, SCCG_PLL2);
-
- /* PLL divs */
- clks[IMX8MQ_SYS1_PLL1_OUT_DIV] = imx_clk_divider("sys1_pll1_out_div", "sys1_pll1_out", base + 0x38, 19, 6);
- clks[IMX8MQ_SYS2_PLL1_OUT_DIV] = imx_clk_divider("sys2_pll1_out_div", "sys2_pll1_out", base + 0x44, 19, 6);
- clks[IMX8MQ_SYS3_PLL1_OUT_DIV] = imx_clk_divider("sys3_pll1_out_div", "sys3_pll1_out", base + 0x50, 19, 6);
- clks[IMX8MQ_DRAM_PLL1_OUT_DIV] = imx_clk_divider("dram_pll1_out_div", "dram_pll1_out", base + 0x68, 19, 6);
- clks[IMX8MQ_SYS1_PLL2_DIV] = imx_clk_divider("sys1_pll2_div", "sys1_pll2", base + 0x38, 1, 6);
- clks[IMX8MQ_SYS2_PLL2_DIV] = imx_clk_divider("sys2_pll2_div", "sys2_pll2", base + 0x44, 1, 6);
- clks[IMX8MQ_SYS3_PLL2_DIV] = imx_clk_divider("sys3_pll2_div", "sys3_pll2", base + 0x50, 1, 6);
- clks[IMX8MQ_DRAM_PLL2_DIV] = imx_clk_divider("dram_pll2_div", "dram_pll2", base + 0x68, 1, 6);
/* PLL bypass out */
- clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels));
+ clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
- clks[IMX8MQ_SYS1_PLL1_OUT] = imx_clk_mux("sys1_pll1_out", base + 0x30, 5, 1, sys1_pll1_out_sels, ARRAY_SIZE(sys1_pll1_out_sels));
- clks[IMX8MQ_SYS2_PLL1_OUT] = imx_clk_mux("sys2_pll1_out", base + 0x3c, 5, 1, sys2_pll1_out_sels, ARRAY_SIZE(sys2_pll1_out_sels));
- clks[IMX8MQ_SYS3_PLL1_OUT] = imx_clk_mux("sys3_pll1_out", base + 0x48, 5, 1, sys3_pll1_out_sels, ARRAY_SIZE(sys3_pll1_out_sels));
- clks[IMX8MQ_DRAM_PLL1_OUT] = imx_clk_mux("dram_pll1_out", base + 0x60, 5, 1, dram_pll1_out_sels, ARRAY_SIZE(dram_pll1_out_sels));
- clks[IMX8MQ_SYS1_PLL2_OUT] = imx_clk_mux("sys1_pll2_out", base + 0x30, 4, 1, sys1_pll2_out_sels, ARRAY_SIZE(sys1_pll2_out_sels));
- clks[IMX8MQ_SYS2_PLL2_OUT] = imx_clk_mux("sys2_pll2_out", base + 0x3c, 4, 1, sys2_pll2_out_sels, ARRAY_SIZE(sys2_pll2_out_sels));
- clks[IMX8MQ_SYS3_PLL2_OUT] = imx_clk_mux("sys3_pll2_out", base + 0x48, 4, 1, sys3_pll2_out_sels, ARRAY_SIZE(sys3_pll2_out_sels));
- clks[IMX8MQ_DRAM_PLL2_OUT] = imx_clk_mux("dram_pll2_out", base + 0x60, 4, 1, dram_pll2_out_sels, ARRAY_SIZE(dram_pll2_out_sels));
-
/* PLL OUT GATE */
clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
@@ -363,11 +331,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
- clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_gate("sys1_pll_out", "sys1_pll2_out", base + 0x30, 9);
- clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_gate("sys2_pll_out", "sys2_pll2_out", base + 0x3c, 9);
- clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_gate("sys3_pll_out", "sys3_pll2_out", base + 0x48, 9);
- clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll2_out", base + 0x60, 9);
+ clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_sccg_pll("sys1_pll_out", sys1_pll_out_sels, ARRAY_SIZE(sys1_pll_out_sels), 0, 0, 0, base + 0x30, CLK_IS_CRITICAL);
+ clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_sccg_pll("sys2_pll_out", sys2_pll_out_sels, ARRAY_SIZE(sys2_pll_out_sels), 0, 0, 1, base + 0x3c, CLK_IS_CRITICAL);
+ clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 1, base + 0x48, CLK_IS_CRITICAL);
+ clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
/* SYS PLL fixed output */
clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
@@ -396,15 +364,19 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/* CORE */
clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
+ clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
+
clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
+ clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
@@ -479,6 +451,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
+ clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
@@ -500,6 +473,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+ clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+ clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+ clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+ clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
@@ -558,6 +536,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+ clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
+ clks[IMX8MQ_CLK_A53_DIV],
+ clks[IMX8MQ_CLK_A53_SRC],
+ clks[IMX8MQ_ARM_PLL_OUT],
+ clks[IMX8MQ_SYS1_PLL_800M]);
+
for (i = 0; i < IMX8MQ_CLK_END; i++)
if (IS_ERR(clks[i]))
pr_err("i.MX8mq clk %u register failed with %ld\n",
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 83e2ef96d81d..5e2903efc488 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -138,6 +138,7 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id imx8qxp_match[] = {
+ { .compatible = "fsl,scu-clk", },
{ .compatible = "fsl,imx8qxp-clk", },
{ /* sentinel */ }
};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
new file mode 100644
index 000000000000..1acfa3e3cfb4
--- /dev/null
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017-2018 NXP.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+
+#include "clk.h"
+
+#define GNRL_CTL 0x0
+#define DIV_CTL 0x4
+#define LOCK_STATUS BIT(31)
+#define LOCK_SEL_MASK BIT(29)
+#define CLKE_MASK BIT(11)
+#define RST_MASK BIT(9)
+#define BYPASS_MASK BIT(4)
+#define MDIV_SHIFT 12
+#define MDIV_MASK GENMASK(21, 12)
+#define PDIV_SHIFT 4
+#define PDIV_MASK GENMASK(9, 4)
+#define SDIV_SHIFT 0
+#define SDIV_MASK GENMASK(2, 0)
+#define KDIV_SHIFT 0
+#define KDIV_MASK GENMASK(15, 0)
+
+#define LOCK_TIMEOUT_US 10000
+
+struct clk_pll14xx {
+ struct clk_hw hw;
+ void __iomem *base;
+ enum imx_pll14xx_type type;
+ const struct imx_pll14xx_rate_table *rate_table;
+ int rate_count;
+};
+
+#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
+
+static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
+ struct clk_pll14xx *pll, unsigned long rate)
+{
+ const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+
+ return NULL;
+}
+
+static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate >= rate_table[i].rate)
+ return rate_table[i].rate;
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
+static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div;
+ u64 fvco = parent_rate;
+
+ pll_gnrl = readl_relaxed(pll->base);
+ pll_div = readl_relaxed(pll->base + 4);
+ mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
+ pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
+ sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
+
+ fvco *= mdiv;
+ do_div(fvco, pdiv << sdiv);
+
+ return fvco;
+}
+
+static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1;
+ short int kdiv;
+ u64 fvco = parent_rate;
+
+ pll_gnrl = readl_relaxed(pll->base);
+ pll_div_ctl0 = readl_relaxed(pll->base + 4);
+ pll_div_ctl1 = readl_relaxed(pll->base + 8);
+ mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
+ pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
+ sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
+ kdiv = pll_div_ctl1 & KDIV_MASK;
+
+ /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
+ fvco *= (mdiv * 65536 + kdiv);
+ pdiv *= 65536;
+
+ do_div(fvco, pdiv << sdiv);
+
+ return fvco;
+}
+
+static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div)
+{
+ u32 old_mdiv, old_pdiv;
+
+ old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
+ old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
+}
+
+static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div_ctl0, u32 pll_div_ctl1)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
+ old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
+ old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+ rate->kdiv != old_kdiv;
+}
+
+static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div_ctl0, u32 pll_div_ctl1)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
+ old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
+ old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+ rate->kdiv != old_kdiv;
+}
+
+static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
+{
+ u32 val;
+
+ return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
+ LOCK_TIMEOUT_US);
+}
+
+static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ const struct imx_pll14xx_rate_table *rate;
+ u32 tmp, div_val;
+ int ret;
+
+ rate = imx_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ tmp = readl_relaxed(pll->base + 4);
+
+ if (!clk_pll1416x_mp_change(rate, tmp)) {
+ tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
+ tmp |= rate->sdiv << SDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 4);
+
+ return 0;
+ }
+
+ /* Bypass clock and set lock to pll output lock */
+ tmp = readl_relaxed(pll->base);
+ tmp |= LOCK_SEL_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ /* Enable RST */
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+
+ /*
+ * According to SPEC, t3 - t2 need to be greater than
+ * 1us and 1/FREF, respectively.
+ * FREF is FIN / Prediv, the prediv is [1, 63], so choose
+ * 3us.
+ */
+ udelay(3);
+
+ /* Disable RST */
+ tmp |= RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ /* Wait Lock */
+ ret = clk_pll14xx_wait_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Bypass */
+ tmp &= ~BYPASS_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ return 0;
+}
+
+static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ const struct imx_pll14xx_rate_table *rate;
+ u32 tmp, div_val;
+ int ret;
+
+ rate = imx_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ tmp = readl_relaxed(pll->base + 4);
+ div_val = readl_relaxed(pll->base + 8);
+
+ if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
+ tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
+ tmp |= rate->sdiv << SDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 4);
+
+ return 0;
+ }
+
+ /* Enable RST */
+ tmp = readl_relaxed(pll->base);
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+ writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
+
+ /*
+ * According to SPEC, t3 - t2 need to be greater than
+ * 1us and 1/FREF, respectively.
+ * FREF is FIN / Prediv, the prediv is [1, 63], so choose
+ * 3us.
+ */
+ udelay(3);
+
+ /* Disable RST */
+ tmp |= RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ /* Wait Lock*/
+ ret = clk_pll14xx_wait_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Bypass */
+ tmp &= ~BYPASS_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ return 0;
+}
+
+static int clk_pll14xx_prepare(struct clk_hw *hw)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
+
+ /*
+ * RESETB = 1 from 0, PLL starts its normal
+ * operation after lock time
+ */
+ val = readl_relaxed(pll->base + GNRL_CTL);
+ val |= RST_MASK;
+ writel_relaxed(val, pll->base + GNRL_CTL);
+
+ return clk_pll14xx_wait_lock(pll);
+}
+
+static int clk_pll14xx_is_prepared(struct clk_hw *hw)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + GNRL_CTL);
+
+ return (val & RST_MASK) ? 1 : 0;
+}
+
+static void clk_pll14xx_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
+
+ /*
+ * Set RST to 0, power down mode is enabled and
+ * every digital block is reset
+ */
+ val = readl_relaxed(pll->base + GNRL_CTL);
+ val &= ~RST_MASK;
+ writel_relaxed(val, pll->base + GNRL_CTL);
+}
+
+static const struct clk_ops clk_pll1416x_ops = {
+ .prepare = clk_pll14xx_prepare,
+ .unprepare = clk_pll14xx_unprepare,
+ .is_prepared = clk_pll14xx_is_prepared,
+ .recalc_rate = clk_pll1416x_recalc_rate,
+ .round_rate = clk_pll14xx_round_rate,
+ .set_rate = clk_pll1416x_set_rate,
+};
+
+static const struct clk_ops clk_pll1416x_min_ops = {
+ .recalc_rate = clk_pll1416x_recalc_rate,
+};
+
+static const struct clk_ops clk_pll1443x_ops = {
+ .prepare = clk_pll14xx_prepare,
+ .unprepare = clk_pll14xx_unprepare,
+ .is_prepared = clk_pll14xx_is_prepared,
+ .recalc_rate = clk_pll1443x_recalc_rate,
+ .round_rate = clk_pll14xx_round_rate,
+ .set_rate = clk_pll1443x_set_rate,
+};
+
+struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
+{
+ struct clk_pll14xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = pll_clk->flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ switch (pll_clk->type) {
+ case PLL_1416X:
+ if (!pll->rate_table)
+ init.ops = &clk_pll1416x_min_ops;
+ else
+ init.ops = &clk_pll1416x_ops;
+ break;
+ case PLL_1443X:
+ init.ops = &clk_pll1443x_ops;
+ break;
+ default:
+ pr_err("%s: Unknown pll type for pll clk %s\n",
+ __func__, name);
+ };
+
+ pll->base = base;
+ pll->hw.init = &init;
+ pll->type = pll_clk->type;
+ pll->rate_table = pll_clk->rate_table;
+ pll->rate_count = pll_clk->rate_count;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll %s %lu\n",
+ __func__, name, PTR_ERR(clk));
+ kfree(pll);
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index ee7752bace89..9dfd03a95557 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -25,87 +25,292 @@
#define PLL_DIVF2_MASK GENMASK(12, 7)
#define PLL_DIVR1_MASK GENMASK(27, 25)
#define PLL_DIVR2_MASK GENMASK(24, 19)
+#define PLL_DIVQ_MASK GENMASK(6, 1)
#define PLL_REF_MASK GENMASK(2, 0)
#define PLL_LOCK_MASK BIT(31)
#define PLL_PD_MASK BIT(7)
-#define OSC_25M 25000000
-#define OSC_27M 27000000
+/* These are the specification limits for the SSCG PLL */
+#define PLL_REF_MIN_FREQ 25000000UL
+#define PLL_REF_MAX_FREQ 235000000UL
-#define PLL_SCCG_LOCK_TIMEOUT 70
+#define PLL_STAGE1_MIN_FREQ 1600000000UL
+#define PLL_STAGE1_MAX_FREQ 2400000000UL
+
+#define PLL_STAGE1_REF_MIN_FREQ 25000000UL
+#define PLL_STAGE1_REF_MAX_FREQ 54000000UL
+
+#define PLL_STAGE2_MIN_FREQ 1200000000UL
+#define PLL_STAGE2_MAX_FREQ 2400000000UL
+
+#define PLL_STAGE2_REF_MIN_FREQ 54000000UL
+#define PLL_STAGE2_REF_MAX_FREQ 75000000UL
+
+#define PLL_OUT_MIN_FREQ 20000000UL
+#define PLL_OUT_MAX_FREQ 1200000000UL
+
+#define PLL_DIVR1_MAX 7
+#define PLL_DIVR2_MAX 63
+#define PLL_DIVF1_MAX 63
+#define PLL_DIVF2_MAX 63
+#define PLL_DIVQ_MAX 63
+
+#define PLL_BYPASS_NONE 0x0
+#define PLL_BYPASS1 0x2
+#define PLL_BYPASS2 0x1
+
+#define SSCG_PLL_BYPASS1_MASK BIT(5)
+#define SSCG_PLL_BYPASS2_MASK BIT(4)
+#define SSCG_PLL_BYPASS_MASK GENMASK(5, 4)
+
+#define PLL_SCCG_LOCK_TIMEOUT 70
+
+struct clk_sccg_pll_setup {
+ int divr1, divf1;
+ int divr2, divf2;
+ int divq;
+ int bypass;
+
+ uint64_t vco1;
+ uint64_t vco2;
+ uint64_t fout;
+ uint64_t ref;
+ uint64_t ref_div1;
+ uint64_t ref_div2;
+ uint64_t fout_request;
+ int fout_error;
+};
struct clk_sccg_pll {
struct clk_hw hw;
- void __iomem *base;
+ const struct clk_ops ops;
+
+ void __iomem *base;
+
+ struct clk_sccg_pll_setup setup;
+
+ u8 parent;
+ u8 bypass1;
+ u8 bypass2;
};
#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
-static int clk_pll_wait_lock(struct clk_sccg_pll *pll)
+static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
{
u32 val;
- return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK, 0,
- PLL_SCCG_LOCK_TIMEOUT);
+ val = readl_relaxed(pll->base + PLL_CFG0);
+
+ /* don't wait for lock if all plls are bypassed */
+ if (!(val & SSCG_PLL_BYPASS2_MASK))
+ return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
+ 0, PLL_SCCG_LOCK_TIMEOUT);
+
+ return 0;
}
-static int clk_pll1_is_prepared(struct clk_hw *hw)
+static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val;
+ int new_diff = temp_setup->fout - temp_setup->fout_request;
+ int diff = temp_setup->fout_error;
- val = readl_relaxed(pll->base + PLL_CFG0);
- return (val & PLL_PD_MASK) ? 0 : 1;
+ if (abs(diff) > abs(new_diff)) {
+ temp_setup->fout_error = new_diff;
+ memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup));
+
+ if (temp_setup->fout_request == temp_setup->fout)
+ return 0;
+ }
+ return -1;
}
-static unsigned long clk_pll1_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val, divf;
+ int ret = -EINVAL;
+
+ for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
+ temp_setup->divq++) {
+ temp_setup->vco2 = temp_setup->vco1;
+ do_div(temp_setup->vco2, temp_setup->divr2 + 1);
+ temp_setup->vco2 *= 2;
+ temp_setup->vco2 *= temp_setup->divf2 + 1;
+ if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
+ temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
+ temp_setup->fout = temp_setup->vco2;
+ do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
+
+ ret = clk_sccg_pll2_check_match(setup, temp_setup);
+ if (!ret) {
+ temp_setup->bypass = PLL_BYPASS1;
+ return ret;
+ }
+ }
+ }
- val = readl_relaxed(pll->base + PLL_CFG2);
- divf = FIELD_GET(PLL_DIVF1_MASK, val);
+ return ret;
+}
+
+static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
+{
+ int ret = -EINVAL;
+
+ for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
+ temp_setup->divf2++) {
+ ret = clk_sccg_divq_lookup(setup, temp_setup);
+ if (!ret)
+ return ret;
+ }
- return parent_rate * 2 * (divf + 1);
+ return ret;
}
-static long clk_pll1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- unsigned long parent_rate = *prate;
- u32 div;
+ int ret = -EINVAL;
+
+ for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
+ temp_setup->divr2++) {
+ temp_setup->ref_div2 = temp_setup->vco1;
+ do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
+ if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
+ temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
+ ret = clk_sccg_divf2_lookup(setup, temp_setup);
+ if (!ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup,
+ uint64_t ref)
+{
+
+ int ret = -EINVAL;
- if (!parent_rate)
- return 0;
+ if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
+ return ret;
- div = rate / (parent_rate * 2);
+ temp_setup->vco1 = ref;
- return parent_rate * div * 2;
+ ret = clk_sccg_divr2_lookup(setup, temp_setup);
+ return ret;
}
-static int clk_pll1_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val;
- u32 divf;
+ int ret = -EINVAL;
- if (!parent_rate)
- return -EINVAL;
+ for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
+ temp_setup->divf1++) {
+ uint64_t vco1 = temp_setup->ref;
- divf = rate / (parent_rate * 2);
+ do_div(vco1, temp_setup->divr1 + 1);
+ vco1 *= 2;
+ vco1 *= temp_setup->divf1 + 1;
- val = readl_relaxed(pll->base + PLL_CFG2);
- val &= ~PLL_DIVF1_MASK;
- val |= FIELD_PREP(PLL_DIVF1_MASK, divf - 1);
- writel_relaxed(val, pll->base + PLL_CFG2);
+ ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1);
+ if (!ret) {
+ temp_setup->bypass = PLL_BYPASS_NONE;
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
+{
+ int ret = -EINVAL;
+
+ for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
+ temp_setup->divr1++) {
+ temp_setup->ref_div1 = temp_setup->ref;
+ do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
+ if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
+ temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
+ ret = clk_sccg_divf1_lookup(setup, temp_setup);
+ if (!ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup,
+ uint64_t ref)
+{
+
+ int ret = -EINVAL;
+
+ if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
+ return ret;
+
+ temp_setup->ref = ref;
+
+ ret = clk_sccg_divr1_lookup(setup, temp_setup);
+
+ return ret;
+}
+
+static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
+ uint64_t prate,
+ uint64_t rate, int try_bypass)
+{
+ struct clk_sccg_pll_setup temp_setup;
+ int ret = -EINVAL;
+
+ memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup));
+ memset(setup, 0, sizeof(struct clk_sccg_pll_setup));
+
+ temp_setup.fout_error = PLL_OUT_MAX_FREQ;
+ temp_setup.fout_request = rate;
+
+ switch (try_bypass) {
- return clk_pll_wait_lock(pll);
+ case PLL_BYPASS2:
+ if (prate == rate) {
+ setup->bypass = PLL_BYPASS2;
+ setup->fout = rate;
+ ret = 0;
+ }
+ break;
+
+ case PLL_BYPASS1:
+ ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate);
+ break;
+
+ case PLL_BYPASS_NONE:
+ ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate);
+ break;
+ }
+
+ return ret;
+}
+
+
+static int clk_sccg_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+
+ u32 val = readl_relaxed(pll->base + PLL_CFG0);
+
+ return (val & PLL_PD_MASK) ? 0 : 1;
}
-static int clk_pll1_prepare(struct clk_hw *hw)
+static int clk_sccg_pll_prepare(struct clk_hw *hw)
{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
@@ -114,10 +319,10 @@ static int clk_pll1_prepare(struct clk_hw *hw)
val &= ~PLL_PD_MASK;
writel_relaxed(val, pll->base + PLL_CFG0);
- return clk_pll_wait_lock(pll);
+ return clk_sccg_pll_wait_lock(pll);
}
-static void clk_pll1_unprepare(struct clk_hw *hw)
+static void clk_sccg_pll_unprepare(struct clk_hw *hw)
{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
@@ -125,121 +330,208 @@ static void clk_pll1_unprepare(struct clk_hw *hw)
val = readl_relaxed(pll->base + PLL_CFG0);
val |= PLL_PD_MASK;
writel_relaxed(val, pll->base + PLL_CFG0);
-
}
-static unsigned long clk_pll2_recalc_rate(struct clk_hw *hw,
+static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val, ref, divr1, divf1, divr2, divf2;
+ u32 val, divr1, divf1, divr2, divf2, divq;
u64 temp64;
- val = readl_relaxed(pll->base + PLL_CFG0);
- switch (FIELD_GET(PLL_REF_MASK, val)) {
- case 0:
- ref = OSC_25M;
- break;
- case 1:
- ref = OSC_27M;
- break;
- default:
- ref = OSC_25M;
- break;
- }
-
val = readl_relaxed(pll->base + PLL_CFG2);
divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
-
- temp64 = ref * 2;
- temp64 *= (divf1 + 1) * (divf2 + 1);
-
- do_div(temp64, (divr1 + 1) * (divr2 + 1));
+ divq = FIELD_GET(PLL_DIVQ_MASK, val);
+
+ temp64 = parent_rate;
+
+ val = clk_readl(pll->base + PLL_CFG0);
+ if (val & SSCG_PLL_BYPASS2_MASK) {
+ temp64 = parent_rate;
+ } else if (val & SSCG_PLL_BYPASS1_MASK) {
+ temp64 *= divf2;
+ do_div(temp64, (divr2 + 1) * (divq + 1));
+ } else {
+ temp64 *= 2;
+ temp64 *= (divf1 + 1) * (divf2 + 1);
+ do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
+ }
return temp64;
}
-static long clk_pll2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- u32 div;
- unsigned long parent_rate = *prate;
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sccg_pll_setup *setup = &pll->setup;
+ u32 val;
- if (!parent_rate)
- return 0;
+ /* set bypass here too since the parent might be the same */
+ val = clk_readl(pll->base + PLL_CFG0);
+ val &= ~SSCG_PLL_BYPASS_MASK;
+ val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
+ clk_writel(val, pll->base + PLL_CFG0);
- div = rate / parent_rate;
+ val = readl_relaxed(pll->base + PLL_CFG2);
+ val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
+ val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
+ val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
+ val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
+ val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
+ val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
+ val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
+ writel_relaxed(val, pll->base + PLL_CFG2);
- return parent_rate * div;
+ return clk_sccg_pll_wait_lock(pll);
}
-static int clk_pll2_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
- u32 divf;
+ u8 ret = pll->parent;
+
+ val = clk_readl(pll->base + PLL_CFG0);
+ if (val & SSCG_PLL_BYPASS2_MASK)
+ ret = pll->bypass2;
+ else if (val & SSCG_PLL_BYPASS1_MASK)
+ ret = pll->bypass1;
+ return ret;
+}
+
+static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
+{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val;
- if (!parent_rate)
- return -EINVAL;
+ val = clk_readl(pll->base + PLL_CFG0);
+ val &= ~SSCG_PLL_BYPASS_MASK;
+ val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
+ clk_writel(val, pll->base + PLL_CFG0);
- divf = rate / parent_rate;
+ return clk_sccg_pll_wait_lock(pll);
+}
- val = readl_relaxed(pll->base + PLL_CFG2);
- val &= ~PLL_DIVF2_MASK;
- val |= FIELD_PREP(PLL_DIVF2_MASK, divf - 1);
- writel_relaxed(val, pll->base + PLL_CFG2);
+static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ uint64_t min,
+ uint64_t max,
+ uint64_t rate,
+ int bypass)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sccg_pll_setup *setup = &pll->setup;
+ struct clk_hw *parent_hw = NULL;
+ int bypass_parent_index;
+ int ret = -EINVAL;
+
+ req->max_rate = max;
+ req->min_rate = min;
+
+ switch (bypass) {
+ case PLL_BYPASS2:
+ bypass_parent_index = pll->bypass2;
+ break;
+ case PLL_BYPASS1:
+ bypass_parent_index = pll->bypass1;
+ break;
+ default:
+ bypass_parent_index = pll->parent;
+ break;
+ }
+
+ parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
+ ret = __clk_determine_rate(parent_hw, req);
+ if (!ret) {
+ ret = clk_sccg_pll_find_setup(setup, req->rate,
+ rate, bypass);
+ }
+
+ req->best_parent_hw = parent_hw;
+ req->best_parent_rate = req->rate;
+ req->rate = setup->fout;
- return clk_pll_wait_lock(pll);
+ return ret;
}
-static const struct clk_ops clk_sccg_pll1_ops = {
- .is_prepared = clk_pll1_is_prepared,
- .recalc_rate = clk_pll1_recalc_rate,
- .round_rate = clk_pll1_round_rate,
- .set_rate = clk_pll1_set_rate,
-};
+static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sccg_pll_setup *setup = &pll->setup;
+ uint64_t rate = req->rate;
+ uint64_t min = req->min_rate;
+ uint64_t max = req->max_rate;
+ int ret = -EINVAL;
+
+ if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
+ return ret;
+
+ ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
+ rate, PLL_BYPASS2);
+ if (!ret)
+ return ret;
+
+ ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
+ PLL_STAGE1_REF_MAX_FREQ, rate,
+ PLL_BYPASS1);
+ if (!ret)
+ return ret;
+
+ ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
+ PLL_REF_MAX_FREQ, rate,
+ PLL_BYPASS_NONE);
+ if (!ret)
+ return ret;
+
+ if (setup->fout >= min && setup->fout <= max)
+ ret = 0;
+
+ return ret;
+}
-static const struct clk_ops clk_sccg_pll2_ops = {
- .prepare = clk_pll1_prepare,
- .unprepare = clk_pll1_unprepare,
- .recalc_rate = clk_pll2_recalc_rate,
- .round_rate = clk_pll2_round_rate,
- .set_rate = clk_pll2_set_rate,
+static const struct clk_ops clk_sccg_pll_ops = {
+ .prepare = clk_sccg_pll_prepare,
+ .unprepare = clk_sccg_pll_unprepare,
+ .is_prepared = clk_sccg_pll_is_prepared,
+ .recalc_rate = clk_sccg_pll_recalc_rate,
+ .set_rate = clk_sccg_pll_set_rate,
+ .set_parent = clk_sccg_pll_set_parent,
+ .get_parent = clk_sccg_pll_get_parent,
+ .determine_rate = clk_sccg_pll_determine_rate,
};
struct clk *imx_clk_sccg_pll(const char *name,
- const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents,
+ u8 parent, u8 bypass1, u8 bypass2,
void __iomem *base,
- enum imx_sccg_pll_type pll_type)
+ unsigned long flags)
{
struct clk_sccg_pll *pll;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
- switch (pll_type) {
- case SCCG_PLL1:
- init.ops = &clk_sccg_pll1_ops;
- break;
- case SCCG_PLL2:
- init.ops = &clk_sccg_pll2_ops;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
+ pll->parent = parent;
+ pll->bypass1 = bypass1;
+ pll->bypass2 = bypass2;
+
+ pll->base = base;
init.name = name;
- init.flags = 0;
- init.parent_names = &parent_name;
- init.num_parents = 1;
+ init.ops = &clk_sccg_pll_ops;
+
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
pll->base = base;
pll->hw.init = &init;
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index 7ccf7edfe11c..fbef740704d0 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -4,12 +4,17 @@
* Dong Aisheng <aisheng.dong@nxp.com>
*/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/arm-smccc.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/slab.h>
#include "clk-scu.h"
+#define IMX_SIP_CPUFREQ 0xC2000001
+#define IMX_SIP_SET_CPUFREQ 0x00
+
static struct imx_sc_ipc *ccm_ipc_handle;
/*
@@ -66,6 +71,41 @@ struct imx_sc_msg_get_clock_rate {
};
/*
+ * struct imx_sc_msg_get_clock_parent - clock get parent protocol
+ * @hdr: SCU protocol header
+ * @req: get parent request protocol
+ * @resp: get parent response protocol
+ *
+ * This structure describes the SCU protocol of clock get parent
+ */
+struct imx_sc_msg_get_clock_parent {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct req_get_clock_parent {
+ __le16 resource;
+ u8 clk;
+ } __packed req;
+ struct resp_get_clock_parent {
+ u8 parent;
+ } resp;
+ } data;
+};
+
+/*
+ * struct imx_sc_msg_set_clock_parent - clock set parent protocol
+ * @hdr: SCU protocol header
+ * @req: set parent request protocol
+ *
+ * This structure describes the SCU protocol of clock set parent
+ */
+struct imx_sc_msg_set_clock_parent {
+ struct imx_sc_rpc_msg hdr;
+ __le16 resource;
+ u8 clk;
+ u8 parent;
+} __packed;
+
+/*
* struct imx_sc_msg_req_clock_enable - clock gate protocol
* @hdr: SCU protocol header
* @resource: clock resource to gate
@@ -145,6 +185,25 @@ static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
return rate;
}
+static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct arm_smccc_res res;
+ unsigned long cluster_id;
+
+ if (clk->rsrc_id == IMX_SC_R_A35)
+ cluster_id = 0;
+ else
+ return -EINVAL;
+
+ /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
+ arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
+ cluster_id, rate, 0, 0, 0, 0, &res);
+
+ return 0;
+}
+
/*
* clk_scu_set_rate - Set rate for a SCU clock
* @hw: clock to change rate for
@@ -173,6 +232,49 @@ static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
}
+static u8 clk_scu_get_parent(struct clk_hw *hw)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct imx_sc_msg_get_clock_parent msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
+ hdr->size = 2;
+
+ msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
+ msg.data.req.clk = clk->clk_type;
+
+ ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: failed to get clock parent %d\n",
+ clk_hw_get_name(hw), ret);
+ return 0;
+ }
+
+ return msg.data.resp.parent;
+}
+
+static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct imx_sc_msg_set_clock_parent msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
+ hdr->size = 2;
+
+ msg.resource = cpu_to_le16(clk->rsrc_id);
+ msg.clk = clk->clk_type;
+ msg.parent = index;
+
+ return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+}
+
static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
u8 clk, bool enable, bool autog)
{
@@ -228,11 +330,22 @@ static const struct clk_ops clk_scu_ops = {
.recalc_rate = clk_scu_recalc_rate,
.round_rate = clk_scu_round_rate,
.set_rate = clk_scu_set_rate,
+ .get_parent = clk_scu_get_parent,
+ .set_parent = clk_scu_set_parent,
+ .prepare = clk_scu_prepare,
+ .unprepare = clk_scu_unprepare,
+};
+
+static const struct clk_ops clk_scu_cpu_ops = {
+ .recalc_rate = clk_scu_recalc_rate,
+ .round_rate = clk_scu_round_rate,
+ .set_rate = clk_scu_atf_set_cpu_rate,
.prepare = clk_scu_prepare,
.unprepare = clk_scu_unprepare,
};
-struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type)
+struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type)
{
struct clk_init_data init;
struct clk_scu *clk;
@@ -248,7 +361,13 @@ struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type)
init.name = name;
init.ops = &clk_scu_ops;
- init.num_parents = 0;
+ if (rsrc_id == IMX_SC_R_A35)
+ init.ops = &clk_scu_cpu_ops;
+ else
+ init.ops = &clk_scu_ops;
+ init.parent_names = parents;
+ init.num_parents = num_parents;
+
/*
* Note on MX8, the clocks are tightly coupled with power domain
* that once the power domain is off, the clock status may be
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
index 52c1746ec988..2bcfaf06a458 100644
--- a/drivers/clk/imx/clk-scu.h
+++ b/drivers/clk/imx/clk-scu.h
@@ -10,7 +10,21 @@
#include <linux/firmware/imx/sci.h>
int imx_clk_scu_init(void);
-struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type);
+
+struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type);
+
+static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id,
+ u8 clk_type)
+{
+ return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type);
+}
+
+static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type)
+{
+ return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type);
+}
struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg,
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 6dae54325a91..a334667c450a 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
anatop_base = of_iomap(np, 0);
BUG_ON(!anatop_base);
+ of_node_put(np);
np = ccm_node;
ccm_base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 028312de21b8..5748ec8673e4 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -27,6 +27,30 @@ enum imx_sccg_pll_type {
SCCG_PLL2,
};
+enum imx_pll14xx_type {
+ PLL_1416X,
+ PLL_1443X,
+};
+
+/* NOTE: Rate table should be kept sorted in descending order. */
+struct imx_pll14xx_rate_table {
+ unsigned int rate;
+ unsigned int pdiv;
+ unsigned int mdiv;
+ unsigned int sdiv;
+ unsigned int kdiv;
+};
+
+struct imx_pll14xx_clk {
+ enum imx_pll14xx_type type;
+ const struct imx_pll14xx_rate_table *rate_table;
+ int rate_count;
+ int flags;
+};
+
+struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
+
struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base);
@@ -36,9 +60,12 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent,
struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
void __iomem *base);
-struct clk *imx_clk_sccg_pll(const char *name, const char *parent_name,
- void __iomem *base,
- enum imx_sccg_pll_type pll_type);
+struct clk *imx_clk_sccg_pll(const char *name,
+ const char * const *parent_names,
+ u8 num_parents,
+ u8 parent, u8 bypass1, u8 bypass2,
+ void __iomem *base,
+ unsigned long flags);
enum imx_pllv3_type {
IMX_PLLV3_GENERIC,
@@ -329,7 +356,8 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
}
static inline struct clk *imx_clk_mux2_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width, const char **parents,
+ void __iomem *reg, u8 shift, u8 width,
+ const char * const *parents,
int num_parents, unsigned long flags)
{
return clk_register_mux(NULL, name, parents, num_parents,
@@ -354,7 +382,7 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name,
struct clk *step);
struct clk *imx8m_clk_composite_flags(const char *name,
- const char **parent_names,
+ const char * const *parent_names,
int num_parents, void __iomem *reg,
unsigned long flags);
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 5ef7d9ba2195..510b685212d3 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -83,7 +83,7 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
const struct ingenic_cgu_clk_info *clk_info;
const struct ingenic_cgu_pll_info *pll_info;
unsigned m, n, od_enc, od;
- bool bypass, enable;
+ bool bypass;
unsigned long flags;
u32 ctl;
@@ -103,7 +103,6 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
od_enc &= GENMASK(pll_info->od_bits - 1, 0);
bypass = !pll_info->no_bypass_bit &&
!!(ctl & BIT(pll_info->bypass_bit));
- enable = !!(ctl & BIT(pll_info->enable_bit));
if (bypass)
return parent_rate;
@@ -426,16 +425,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
- long rate = *parent_rate;
+ unsigned int div = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
if (clk_info->type & CGU_CLK_DIV)
- rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
+ div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
else if (clk_info->type & CGU_CLK_FIXDIV)
- rate /= clk_info->fixdiv.div;
+ div = clk_info->fixdiv.div;
- return rate;
+ return DIV_ROUND_UP(*parent_rate, div);
}
static int
@@ -455,7 +454,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
if (clk_info->type & CGU_CLK_DIV) {
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
- rate = parent_rate / div;
+ rate = DIV_ROUND_UP(parent_rate, div);
if (rate != req_rate)
return -EINVAL;
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 502bcbb61b04..e12716d8ce3c 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info {
* @reg: offset of the divider control register within the CGU
* @shift: number of bits to left shift the divide value by (ie. the index of
* the lowest bit of the divide value within its control register)
- * @div: number of bits to divide the divider value by (i.e. if the
+ * @div: number to divide the divider value by (i.e. if the
* effective divider value is the value written to the register
* multiplied by some constant)
* @bits: the size of the divide value in bits
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 4479c102e899..b86edd328249 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -165,7 +165,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 29, 1 },
.div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
- .gate = { CGU_REG_SCR, 6 },
+ .gate = { CGU_REG_SCR, 6, true },
},
/* Gate-only clocks */
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 934bf0e45e26..9628d4e7690b 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -157,7 +157,8 @@ struct clk *mtk_clk_register_gate(
int clr_ofs,
int sta_ofs,
u8 bit,
- const struct clk_ops *ops)
+ const struct clk_ops *ops,
+ unsigned long flags)
{
struct mtk_clk_gate *cg;
struct clk *clk;
@@ -172,6 +173,7 @@ struct clk *mtk_clk_register_gate(
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
init.ops = ops;
+ init.flags = flags;
cg->regmap = regmap;
cg->set_ofs = set_ofs;
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 72ef89b3ad7b..9f766dfe1d57 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -47,6 +47,7 @@ struct clk *mtk_clk_register_gate(
int clr_ofs,
int sta_ofs,
u8 bit,
- const struct clk_ops *ops);
+ const struct clk_ops *ops,
+ unsigned long flags);
#endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index ab6ab07f53e6..905a2316f6a7 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -535,8 +535,8 @@ static const struct mtk_composite top_muxes[] = {
0x0080, 8, 2, 15),
MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents,
0x0080, 16, 3, 23),
- MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents,
- 0x0080, 24, 2, 31),
+ MUX_GATE_FLAGS_2(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents,
+ 0x0080, 24, 2, 31, 0, CLK_MUX_ROUND_CLOSEST),
MUX_GATE(CLK_TOP_TVE_SEL, "tve_sel", tve_parents,
0x0090, 0, 3, 7),
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 991d4093726e..b09cb3d99f66 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -223,6 +223,8 @@ static const struct mtk_fixed_factor top_divs[] = {
4),
FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1,
3),
+ FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2_ck", 1,
+ 3),
};
static const char * const axi_parents[] = {
@@ -594,7 +596,8 @@ static const char * const a1sys_hp_parents[] = {
"apll1_ck",
"apll1_d2",
"apll1_d4",
- "apll1_d8"
+ "apll1_d8",
+ "apll1_d3"
};
static const char * const a2sys_hp_parents[] = {
@@ -602,7 +605,8 @@ static const char * const a2sys_hp_parents[] = {
"apll2_ck",
"apll2_d2",
"apll2_d4",
- "apll2_d8"
+ "apll2_d8",
+ "apll2_d3"
};
static const char * const asm_l_parents[] = {
@@ -1463,7 +1467,6 @@ static struct platform_driver clk_mt2712_drv = {
.probe = clk_mt2712_probe,
.driver = {
.name = "clk-mt2712",
- .owner = THIS_MODULE,
.of_match_table = of_match_clk_mt2712,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index 5702bc974ed9..c2b46b184b9a 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -324,6 +324,10 @@ static const char * const anc_md32_parents[] = {
"univpll_d5",
};
+/*
+ * Clock mux ddrphycfg is needed by the DRAM controller. We mark it as
+ * critical as otherwise the system will hang after boot.
+ */
static const struct mtk_composite top_muxes[] = {
MUX(CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE, "ulposc_axi_ck_mux_pre",
ulposc_axi_ck_mux_pre_parents, 0x0040, 3, 1),
@@ -331,8 +335,8 @@ static const struct mtk_composite top_muxes[] = {
ulposc_axi_ck_mux_parents, 0x0040, 2, 1),
MUX(CLK_TOP_MUX_AXI, "axi_sel", axi_parents,
0x0040, 0, 2),
- MUX(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents,
- 0x0040, 16, 2),
+ MUX_FLAGS(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x0040, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX(CLK_TOP_MUX_MM, "mm_sel", mm_parents,
0x0040, 24, 2),
MUX_GATE(CLK_TOP_MUX_PWM, "pwm_sel", pwm_parents, 0x0050, 0, 3, 7),
@@ -424,33 +428,45 @@ static const struct mtk_gate_regs infra2_cg_regs = {
.sta_ofs = 0x00b0,
};
-#define GATE_ICG0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
+#define GATE_ICG0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
}
-#define GATE_ICG1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
+#define GATE_ICG1(_id, _name, _parent, _shift) \
+ GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = _flags, \
}
-#define GATE_ICG2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
+#define GATE_ICG2(_id, _name, _parent, _shift) \
+ GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = _flags, \
}
+/*
+ * Clock gates dramc and dramc_b are needed by the DRAM controller.
+ * We mark them as critical as otherwise the system will hang after boot.
+ */
static const struct mtk_gate infra_clks[] = {
GATE_ICG0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "ulposc", 0),
GATE_ICG0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "pmicspi_sel", 1),
@@ -505,7 +521,8 @@ static const struct mtk_gate infra_clks[] = {
GATE_ICG1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23),
GATE_ICG1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25),
GATE_ICG1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26),
- GATE_ICG1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "clk26m", 31),
+ GATE_ICG1_FLAGS(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m",
+ "clk26m", 31, CLK_IS_CRITICAL),
GATE_ICG2(CLK_INFRA_I2C4, "infra_i2c4", "axi_sel", 0),
GATE_ICG2(CLK_INFRA_I2C_APPM, "infra_i2c_appm", "axi_sel", 1),
GATE_ICG2(CLK_INFRA_I2C_GPUPM, "infra_i2c_gpupm", "axi_sel", 2),
@@ -516,7 +533,8 @@ static const struct mtk_gate infra_clks[] = {
GATE_ICG2(CLK_INFRA_I2C5, "infra_i2c5", "axi_sel", 7),
GATE_ICG2(CLK_INFRA_SYS_CIRQ, "infra_sys_cirq", "axi_sel", 8),
GATE_ICG2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 10),
- GATE_ICG2(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m", "clk26m", 11),
+ GATE_ICG2_FLAGS(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m",
+ "clk26m", 11, CLK_IS_CRITICAL),
GATE_ICG2(CLK_INFRA_ANC_MD32, "infra_anc_md32", "anc_md32_sel", 12),
GATE_ICG2(CLK_INFRA_ANC_MD32_32K, "infra_anc_md32_32k", "clk26m", 13),
GATE_ICG2(CLK_INFRA_DVFS_SPM1, "infra_dvfs_spm1", "axi_sel", 15),
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 96c292c3e440..deedeb3ea33b 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -533,7 +533,7 @@ static const char * const ca53_parents[] __initconst = {
"univpll"
};
-static const char * const ca57_parents[] __initconst = {
+static const char * const ca72_parents[] __initconst = {
"clk26m",
"armca15pll",
"mainpll",
@@ -542,7 +542,7 @@ static const char * const ca57_parents[] __initconst = {
static const struct mtk_composite cpu_muxes[] __initconst = {
MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2),
- MUX(CLK_INFRA_CA57SEL, "infra_ca57_sel", ca57_parents, 0x0000, 2, 2),
+ MUX(CLK_INFRA_CA72SEL, "infra_ca72_sel", ca72_parents, 0x0000, 2, 2),
};
static const struct mtk_composite top_muxes[] __initconst = {
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 9c0ae4278a94..5531dd2e496d 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -130,7 +130,7 @@ int mtk_clk_register_gates(struct device_node *node,
gate->regs->set_ofs,
gate->regs->clr_ofs,
gate->regs->sta_ofs,
- gate->shift, gate->ops);
+ gate->shift, gate->ops, gate->flags);
if (IS_ERR(clk)) {
pr_err("Failed to register clk %s: %ld\n",
@@ -167,7 +167,7 @@ struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
mux->mask = BIT(mc->mux_width) - 1;
mux->shift = mc->mux_shift;
mux->lock = lock;
-
+ mux->flags = mc->mux_flags;
mux_hw = &mux->hw;
mux_ops = &clk_mux_ops;
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index f83c2bbb677e..fb27b5bf30d9 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -81,15 +81,13 @@ struct mtk_composite {
signed char divider_shift;
signed char divider_width;
+ u8 mux_flags;
+
signed char num_parents;
};
-/*
- * In case the rate change propagation to parent clocks is undesirable,
- * this macro allows to specify the clock flags manually.
- */
-#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
- _gate, _flags) { \
+#define MUX_GATE_FLAGS_2(_id, _name, _parents, _reg, _shift, \
+ _width, _gate, _flags, _muxflags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -101,9 +99,19 @@ struct mtk_composite {
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.flags = _flags, \
+ .mux_flags = _muxflags, \
}
/*
+ * In case the rate change propagation to parent clocks is undesirable,
+ * this macro allows to specify the clock flags manually.
+ */
+#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, _flags) \
+ MUX_GATE_FLAGS_2(_id, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags, 0)
+
+/*
* Unless necessary, all MUX_GATE clocks propagate rate changes to their
* parent clock by default.
*/
@@ -111,7 +119,11 @@ struct mtk_composite {
MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
_gate, CLK_SET_RATE_PARENT)
-#define MUX(_id, _name, _parents, _reg, _shift, _width) { \
+#define MUX(_id, _name, _parents, _reg, _shift, _width) \
+ MUX_FLAGS(_id, _name, _parents, _reg, \
+ _shift, _width, CLK_SET_RATE_PARENT)
+
+#define MUX_FLAGS(_id, _name, _parents, _reg, _shift, _width, _flags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -121,7 +133,7 @@ struct mtk_composite {
.divider_shift = -1, \
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
- .flags = CLK_SET_RATE_PARENT, \
+ .flags = _flags, \
}
#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
@@ -158,6 +170,7 @@ struct mtk_gate {
const struct mtk_gate_regs *regs;
int shift;
const struct clk_ops *ops;
+ unsigned long flags;
};
int mtk_clk_register_gates(struct device_node *node,
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index efaa70f682b4..3858747f5438 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,27 +1,52 @@
-config COMMON_CLK_AMLOGIC
- bool
- depends on ARCH_MESON || COMPILE_TEST
- select COMMON_CLK_REGMAP_MESON
+config COMMON_CLK_MESON_INPUT
+ tristate
-config COMMON_CLK_AMLOGIC_AUDIO
- bool
- depends on ARCH_MESON || COMPILE_TEST
- select COMMON_CLK_AMLOGIC
+config COMMON_CLK_MESON_REGMAP
+ tristate
+ select REGMAP
-config COMMON_CLK_MESON_AO
- bool
- depends on OF
- depends on ARCH_MESON || COMPILE_TEST
- select COMMON_CLK_REGMAP_MESON
+config COMMON_CLK_MESON_DUALDIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_MPLL
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_PHASE
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_PLL
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_SCLK_DIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_VID_PLL_DIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_AO_CLKC
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_INPUT
select RESET_CONTROLLER
-config COMMON_CLK_REGMAP_MESON
- bool
- select REGMAP
+config COMMON_CLK_MESON_EE_CLKC
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_INPUT
config COMMON_CLK_MESON8B
bool
- select COMMON_CLK_AMLOGIC
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select MFD_SYSCON
select RESET_CONTROLLER
help
Support for the clock controller on AmLogic S802 (Meson8),
@@ -30,8 +55,14 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
bool
- select COMMON_CLK_AMLOGIC
- select COMMON_CLK_MESON_AO
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_DUALDIV
+ select COMMON_CLK_MESON_VID_PLL_DIV
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select COMMON_CLK_MESON_AO_CLKC
+ select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -39,8 +70,13 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
bool
- select COMMON_CLK_AMLOGIC
- select COMMON_CLK_MESON_AO
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_DUALDIV
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select COMMON_CLK_MESON_AO_CLKC
+ select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -48,9 +84,26 @@ config COMMON_CLK_AXG
config COMMON_CLK_AXG_AUDIO
tristate "Meson AXG Audio Clock Controller Driver"
- depends on COMMON_CLK_AXG
- select COMMON_CLK_AMLOGIC_AUDIO
- select MFD_SYSCON
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_INPUT
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_PHASE
+ select COMMON_CLK_MESON_SCLK_DIV
+ select REGMAP_MMIO
help
Support for the audio clock controller on AmLogic A113D devices,
aka axg, Say Y if you want audio subsystem to work.
+
+config COMMON_CLK_G12A
+ bool
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_DUALDIV
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select COMMON_CLK_MESON_AO_CLKC
+ select COMMON_CLK_MESON_EE_CLKC
+ select MFD_SYSCON
+ help
+ Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
+ devices, aka g12a. Say Y if you want peripherals to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index a849aa809825..021fc290e749 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -1,13 +1,20 @@
-#
-# Makefile for Meson specific clk
-#
+# Amlogic clock drivers
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o vid-pll-div.o
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-input.o
-obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o
-obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
+obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_INPUT) += clk-input.o
+obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
+obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
+obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
+obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o
+obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o
+obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o
+
+# Amlogic Clock controllers
+
+obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
+obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
+obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
-obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
-obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
-obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
-obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 29e088542387..0086f31288eb 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -12,10 +12,27 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
-#include "clk-regmap.h"
#include "meson-aoclk.h"
#include "axg-aoclk.h"
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+#define IN_PREFIX "ao-in-"
+
+/*
+ * AO Configuration Clock registers offsets
+ * Register offsets from the data sheet must be multiplied by 4.
+ */
+#define AO_RTI_PWR_CNTL_REG1 0x0C
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_OSCIN_CNTL 0x58
+#define AO_CRT_CLK_CNTL1 0x68
+#define AO_SAR_CLK 0x90
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
#define AXG_AO_GATE(_name, _bit) \
static struct clk_regmap axg_aoclk_##_name = { \
.data = &(struct clk_regmap_gate_data) { \
@@ -25,7 +42,7 @@ static struct clk_regmap axg_aoclk_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "axg_ao_" #_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ "clk81" }, \
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -39,17 +56,141 @@ AXG_AO_GATE(uart2, 5);
AXG_AO_GATE(ir_blaster, 6);
AXG_AO_GATE(saradc, 7);
+static struct clk_regmap axg_aoclk_cts_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .bit_idx = 14,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_oscin",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_aoclk_32k_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
+ {
+ .dual = 1,
+ .n1 = 733,
+ .m1 = 8,
+ .n2 = 732,
+ .m2 = 11,
+ }, {}
+};
+
+static struct clk_regmap axg_aoclk_32k_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = axg_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_aoclk_32k_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTC_ALT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k_div",
+ "axg_ao_32k_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_aoclk_32k = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 10,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_cts_rtc_oscin",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k",
+ IN_PREFIX "ext_32k-0" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap axg_aoclk_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
.shift = 8,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "clk81", "ao_alt_xtal"},
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
+ "axg_ao_cts_rtc_oscin"},
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -62,7 +203,8 @@ static struct clk_regmap axg_aoclk_saradc_mux = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "xtal", "axg_ao_clk81" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "axg_ao_clk81" },
.num_parents = 2,
},
};
@@ -106,17 +248,23 @@ static const unsigned int axg_aoclk_reset[] = {
};
static struct clk_regmap *axg_aoclk_regmap[] = {
- [CLKID_AO_REMOTE] = &axg_aoclk_remote,
- [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master,
- [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave,
- [CLKID_AO_UART1] = &axg_aoclk_uart1,
- [CLKID_AO_UART2] = &axg_aoclk_uart2,
- [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster,
- [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc,
- [CLKID_AO_CLK81] = &axg_aoclk_clk81,
- [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux,
- [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div,
- [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate,
+ &axg_aoclk_remote,
+ &axg_aoclk_i2c_master,
+ &axg_aoclk_i2c_slave,
+ &axg_aoclk_uart1,
+ &axg_aoclk_uart2,
+ &axg_aoclk_ir_blaster,
+ &axg_aoclk_saradc,
+ &axg_aoclk_cts_oscin,
+ &axg_aoclk_32k_pre,
+ &axg_aoclk_32k_div,
+ &axg_aoclk_32k_sel,
+ &axg_aoclk_32k,
+ &axg_aoclk_cts_rtc_oscin,
+ &axg_aoclk_clk81,
+ &axg_aoclk_saradc_mux,
+ &axg_aoclk_saradc_div,
+ &axg_aoclk_saradc_gate,
};
static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
@@ -132,10 +280,22 @@ static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
[CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
[CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
[CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
+ [CLKID_AO_32K] = &axg_aoclk_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
},
.num = NR_CLKS,
};
+static const struct meson_aoclk_input axg_aoclk_inputs[] = {
+ { .name = "xtal", .required = true },
+ { .name = "mpeg-clk", .required = true },
+ { .name = "ext-32k-0", .required = false },
+};
+
static const struct meson_aoclk_data axg_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(axg_aoclk_reset),
@@ -143,6 +303,9 @@ static const struct meson_aoclk_data axg_aoclkc_data = {
.num_clks = ARRAY_SIZE(axg_aoclk_regmap),
.clks = axg_aoclk_regmap,
.hw_data = &axg_aoclk_onecell_data,
+ .inputs = axg_aoclk_inputs,
+ .num_inputs = ARRAY_SIZE(axg_aoclk_inputs),
+ .input_prefix = IN_PREFIX,
};
static const struct of_device_id axg_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/axg-aoclk.h b/drivers/clk/meson/axg-aoclk.h
index 91384d8dd844..3cc27e85170f 100644
--- a/drivers/clk/meson/axg-aoclk.h
+++ b/drivers/clk/meson/axg-aoclk.h
@@ -10,18 +10,7 @@
#ifndef __AXG_AOCLKC_H
#define __AXG_AOCLKC_H
-#define NR_CLKS 11
-/* AO Configuration Clock registers offsets
- * Register offsets from the data sheet must be multiplied by 4.
- */
-#define AO_RTI_PWR_CNTL_REG1 0x0C
-#define AO_RTI_PWR_CNTL_REG0 0x10
-#define AO_RTI_GEN_CNTL_REG0 0x40
-#define AO_OSCIN_CNTL 0x58
-#define AO_CRT_CLK_CNTL1 0x68
-#define AO_SAR_CLK 0x90
-#define AO_RTC_ALT_CLK_CNTL0 0x94
-#define AO_RTC_ALT_CLK_CNTL1 0x98
+#define NR_CLKS 17
#include <dt-bindings/clock/axg-aoclkc.h>
#include <dt-bindings/reset/axg-aoclkc.h>
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 8ac3a2295473..7ab200b6c3bf 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -14,8 +14,11 @@
#include <linux/reset.h>
#include <linux/slab.h>
-#include "clkc-audio.h"
#include "axg-audio.h"
+#include "clk-input.h"
+#include "clk-regmap.h"
+#include "clk-phase.h"
+#include "sclk-div.h"
#define AXG_MST_IN_COUNT 8
#define AXG_SLV_SCLK_COUNT 10
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 792735d7e46e..7a8ef80e5f2c 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -9,16 +9,17 @@
* Author: Qiufang Dai <qiufang.dai@amlogic.com>
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include "clkc.h"
+#include "clk-input.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-mpll.h"
#include "axg.h"
+#include "meson-eeclk.h"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -58,7 +59,7 @@ static struct clk_regmap axg_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -113,7 +114,7 @@ static struct clk_regmap axg_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -214,7 +215,7 @@ static struct clk_regmap axg_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -283,7 +284,7 @@ static struct clk_regmap axg_hifi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hifi_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -701,7 +702,7 @@ static struct clk_regmap axg_pcie_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -803,7 +804,7 @@ static struct clk_regmap axg_pcie_cml_en1 = {
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
- "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
@@ -852,7 +853,7 @@ static struct clk_regmap axg_clk81 = {
};
static const char * const axg_sd_emmc_clk0_parent_names[] = {
- "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+ IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
/*
* Following these parent clocks, we should also have had mpll2, mpll3
@@ -957,7 +958,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
static const char * const gen_clk_parent_names[] = {
- "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
+ IN_PREFIX "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
};
@@ -1255,46 +1256,20 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_pcie_pll_od,
};
+static const struct meson_eeclkc_data axg_clkc_data = {
+ .regmap_clks = axg_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
+ .hw_onecell_data = &axg_hw_onecell_data,
+};
+
+
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,axg-clkc" },
+ { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
-static int axg_clkc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *map;
- int ret, i;
-
- /* Get the hhi system controller node if available */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
- if (IS_ERR(map)) {
- dev_err(dev, "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++)
- axg_clk_regmaps[i]->map = map;
-
- for (i = 0; i < axg_hw_onecell_data.num; i++) {
- /* array might be sparse */
- if (!axg_hw_onecell_data.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &axg_hw_onecell_data);
-}
-
static struct platform_driver axg_driver = {
- .probe = axg_clkc_probe,
+ .probe = meson_eeclkc_probe,
.driver = {
.name = "axg-clkc",
.of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
new file mode 100644
index 000000000000..c5ca23a5e3e8
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+/*
+ * The AO Domain embeds a dual/divider to generate a more precise
+ * 32,768KHz clock for low-power suspend mode and CEC.
+ * ______ ______
+ * | | | |
+ * | Div1 |-| Cnt1 |
+ * /|______| |______|\
+ * -| ______ ______ X--> Out
+ * \| | | |/
+ * | Div2 |-| Cnt2 |
+ * |______| |______|
+ *
+ * The dividing can be switched to single or dual, with a counter
+ * for each divider to set when the switching is done.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+static inline struct meson_clk_dualdiv_data *
+meson_clk_dualdiv_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_dualdiv_data *)clk->data;
+}
+
+static unsigned long
+__dualdiv_param_to_rate(unsigned long parent_rate,
+ const struct meson_clk_dualdiv_param *p)
+{
+ if (!p->dual)
+ return DIV_ROUND_CLOSEST(parent_rate, p->n1);
+
+ return DIV_ROUND_CLOSEST(parent_rate * (p->m1 + p->m2),
+ p->n1 * p->m1 + p->n2 * p->m2);
+}
+
+static unsigned long meson_clk_dualdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
+ struct meson_clk_dualdiv_param setting;
+
+ setting.dual = meson_parm_read(clk->map, &dualdiv->dual);
+ setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1;
+ setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1;
+ setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1;
+ setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1;
+
+ return __dualdiv_param_to_rate(parent_rate, &setting);
+}
+
+static const struct meson_clk_dualdiv_param *
+__dualdiv_get_setting(unsigned long rate, unsigned long parent_rate,
+ struct meson_clk_dualdiv_data *dualdiv)
+{
+ const struct meson_clk_dualdiv_param *table = dualdiv->table;
+ unsigned long best = 0, now = 0;
+ unsigned int i, best_i = 0;
+
+ if (!table)
+ return NULL;
+
+ for (i = 0; table[i].n1; i++) {
+ now = __dualdiv_param_to_rate(parent_rate, &table[i]);
+
+ /* If we get an exact match, don't bother any further */
+ if (now == rate) {
+ return &table[i];
+ } else if (abs(now - rate) < abs(best - rate)) {
+ best = now;
+ best_i = i;
+ }
+ }
+
+ return (struct meson_clk_dualdiv_param *)&table[best_i];
+}
+
+static long meson_clk_dualdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
+ const struct meson_clk_dualdiv_param *setting =
+ __dualdiv_get_setting(rate, *parent_rate, dualdiv);
+
+ if (!setting)
+ return meson_clk_dualdiv_recalc_rate(hw, *parent_rate);
+
+ return __dualdiv_param_to_rate(*parent_rate, setting);
+}
+
+static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
+ const struct meson_clk_dualdiv_param *setting =
+ __dualdiv_get_setting(rate, parent_rate, dualdiv);
+
+ if (!setting)
+ return -EINVAL;
+
+ meson_parm_write(clk->map, &dualdiv->dual, setting->dual);
+ meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1);
+ meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1);
+ meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1);
+ meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_dualdiv_ops = {
+ .recalc_rate = meson_clk_dualdiv_recalc_rate,
+ .round_rate = meson_clk_dualdiv_round_rate,
+ .set_rate = meson_clk_dualdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
+
+const struct clk_ops meson_clk_dualdiv_ro_ops = {
+ .recalc_rate = meson_clk_dualdiv_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic dual divider driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-dualdiv.h b/drivers/clk/meson/clk-dualdiv.h
new file mode 100644
index 000000000000..4aa939018012
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_DUALDIV_H
+#define __MESON_CLK_DUALDIV_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_clk_dualdiv_param {
+ unsigned int n1;
+ unsigned int n2;
+ unsigned int m1;
+ unsigned int m2;
+ unsigned int dual;
+};
+
+struct meson_clk_dualdiv_data {
+ struct parm n1;
+ struct parm n2;
+ struct parm m1;
+ struct parm m2;
+ struct parm dual;
+ const struct meson_clk_dualdiv_param *table;
+};
+
+extern const struct clk_ops meson_clk_dualdiv_ops;
+extern const struct clk_ops meson_clk_dualdiv_ro_ops;
+
+#endif /* __MESON_CLK_DUALDIV_H */
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c
index 06b3e3bb6a66..086226e9dba6 100644
--- a/drivers/clk/meson/clk-input.c
+++ b/drivers/clk/meson/clk-input.c
@@ -7,7 +7,8 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
-#include "clkc.h"
+#include <linux/module.h>
+#include "clk-input.h"
static const struct clk_ops meson_clk_no_ops = {};
@@ -42,3 +43,7 @@ struct clk_hw *meson_clk_hw_register_input(struct device *dev,
return ret ? ERR_PTR(ret) : hw;
}
EXPORT_SYMBOL_GPL(meson_clk_hw_register_input);
+
+MODULE_DESCRIPTION("Amlogic clock input helper");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-input.h b/drivers/clk/meson/clk-input.h
new file mode 100644
index 000000000000..4a541b9685a6
--- /dev/null
+++ b/drivers/clk/meson/clk-input.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_INPUT_H
+#define __MESON_CLK_INPUT_H
+
+#include <linux/clk-provider.h>
+
+struct device;
+
+struct clk_hw *meson_clk_hw_register_input(struct device *dev,
+ const char *of_name,
+ const char *clk_name,
+ unsigned long flags);
+
+#endif /* __MESON_CLK_INPUT_H */
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 650f75cc15a9..f76850d99e59 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -12,7 +12,11 @@
*/
#include <linux/clk-provider.h>
-#include "clkc.h"
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-mpll.h"
#define SDM_DEN 16384
#define N2_MIN 4
@@ -138,9 +142,15 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
};
+EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
.set_rate = mpll_set_rate,
};
+EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
+
+MODULE_DESCRIPTION("Amlogic MPLL driver");
+MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
new file mode 100644
index 000000000000..cf79340006dd
--- /dev/null
+++ b/drivers/clk/meson/clk-mpll.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_MPLL_H
+#define __MESON_CLK_MPLL_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "parm.h"
+
+struct meson_clk_mpll_data {
+ struct parm sdm;
+ struct parm sdm_en;
+ struct parm n2;
+ struct parm ssen;
+ struct parm misc;
+ spinlock_t *lock;
+ u8 flags;
+};
+
+#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
+
+extern const struct clk_ops meson_clk_mpll_ro_ops;
+extern const struct clk_ops meson_clk_mpll_ops;
+
+#endif /* __MESON_CLK_MPLL_H */
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index cba43748ce3d..80c3ada193a4 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -5,7 +5,10 @@
*/
#include <linux/clk-provider.h>
-#include "clkc.h"
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "clk-phase.h"
#define phase_step(_width) (360 / (1 << (_width)))
@@ -15,13 +18,12 @@ meson_clk_phase_data(struct clk_regmap *clk)
return (struct meson_clk_phase_data *)clk->data;
}
-int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
+static int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
{
return phase_step(width) * val;
}
-EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
-unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
+static unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
{
unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
@@ -31,7 +33,6 @@ unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
*/
return val % (1 << width);
}
-EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
static int meson_clk_phase_get_phase(struct clk_hw *hw)
{
@@ -61,3 +62,67 @@ const struct clk_ops meson_clk_phase_ops = {
.set_phase = meson_clk_phase_set_phase,
};
EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
+
+/*
+ * This is a special clock for the audio controller.
+ * The phase of mst_sclk clock output can be controlled independently
+ * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
+ * Controlling these 3 phases as just one makes things simpler and
+ * give the same clock view to all the element on the i2s bus.
+ * If necessary, we can still control the phase in the tdm block
+ * which makes these independent control redundant.
+ */
+static inline struct meson_clk_triphase_data *
+meson_clk_triphase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_triphase_data *)clk->data;
+}
+
+static void meson_clk_triphase_sync(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Get phase 0 and sync it to phase 1 and 2 */
+ val = meson_parm_read(clk->map, &tph->ph0);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+}
+
+static int meson_clk_triphase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Phase are in sync, reading phase 0 is enough */
+ val = meson_parm_read(clk->map, &tph->ph0);
+
+ return meson_clk_degrees_from_val(val, tph->ph0.width);
+}
+
+static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
+ meson_parm_write(clk->map, &tph->ph0, val);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_triphase_ops = {
+ .init = meson_clk_triphase_sync,
+ .get_phase = meson_clk_triphase_get_phase,
+ .set_phase = meson_clk_triphase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
+
+MODULE_DESCRIPTION("Amlogic phase driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-phase.h b/drivers/clk/meson/clk-phase.h
new file mode 100644
index 000000000000..5579f9ced142
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_PHASE_H
+#define __MESON_CLK_PHASE_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_clk_phase_data {
+ struct parm ph;
+};
+
+struct meson_clk_triphase_data {
+ struct parm ph0;
+ struct parm ph1;
+ struct parm ph2;
+};
+
+extern const struct clk_ops meson_clk_phase_ops;
+extern const struct clk_ops meson_clk_triphase_ops;
+
+#endif /* __MESON_CLK_PHASE_H */
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index afffc1547e20..41e16dd7272a 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -32,11 +32,10 @@
#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/string.h>
+#include <linux/rational.h>
-#include "clkc.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
static inline struct meson_clk_pll_data *
meson_clk_pll_data(struct clk_regmap *clk)
@@ -44,12 +43,21 @@ meson_clk_pll_data(struct clk_regmap *clk)
return (struct meson_clk_pll_data *)clk->data;
}
+static int __pll_round_closest_mult(struct meson_clk_pll_data *pll)
+{
+ if ((pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) &&
+ !MESON_PARM_APPLICABLE(&pll->frac))
+ return 1;
+
+ return 0;
+}
+
static unsigned long __pll_params_to_rate(unsigned long parent_rate,
- const struct pll_params_table *pllt,
- u16 frac,
+ unsigned int m, unsigned int n,
+ unsigned int frac,
struct meson_clk_pll_data *pll)
{
- u64 rate = (u64)parent_rate * pllt->m;
+ u64 rate = (u64)parent_rate * m;
if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
u64 frac_rate = (u64)parent_rate * frac;
@@ -58,7 +66,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
(1 << pll->frac.width));
}
- return DIV_ROUND_UP_ULL(rate, pllt->n);
+ return DIV_ROUND_UP_ULL(rate, n);
}
static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
@@ -66,35 +74,39 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- struct pll_params_table pllt;
- u16 frac;
+ unsigned int m, n, frac;
- pllt.n = meson_parm_read(clk->map, &pll->n);
- pllt.m = meson_parm_read(clk->map, &pll->m);
+ n = meson_parm_read(clk->map, &pll->n);
+ m = meson_parm_read(clk->map, &pll->m);
frac = MESON_PARM_APPLICABLE(&pll->frac) ?
meson_parm_read(clk->map, &pll->frac) :
0;
- return __pll_params_to_rate(parent_rate, &pllt, frac, pll);
+ return __pll_params_to_rate(parent_rate, m, n, frac, pll);
}
-static u16 __pll_params_with_frac(unsigned long rate,
- unsigned long parent_rate,
- const struct pll_params_table *pllt,
- struct meson_clk_pll_data *pll)
+static unsigned int __pll_params_with_frac(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int m,
+ unsigned int n,
+ struct meson_clk_pll_data *pll)
{
- u16 frac_max = (1 << pll->frac.width);
- u64 val = (u64)rate * pllt->n;
+ unsigned int frac_max = (1 << pll->frac.width);
+ u64 val = (u64)rate * n;
+
+ /* Bail out if we are already over the requested rate */
+ if (rate < parent_rate * m / n)
+ return 0;
if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
else
val = div_u64(val * frac_max, parent_rate);
- val -= pllt->m * frac_max;
+ val -= m * frac_max;
- return min((u16)val, (u16)(frac_max - 1));
+ return min((unsigned int)val, (frac_max - 1));
}
static bool meson_clk_pll_is_better(unsigned long rate,
@@ -102,45 +114,123 @@ static bool meson_clk_pll_is_better(unsigned long rate,
unsigned long now,
struct meson_clk_pll_data *pll)
{
- if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) ||
- MESON_PARM_APPLICABLE(&pll->frac)) {
- /* Round down */
- if (now < rate && best < now)
- return true;
- } else {
+ if (__pll_round_closest_mult(pll)) {
/* Round Closest */
if (abs(now - rate) < abs(best - rate))
return true;
+ } else {
+ /* Round down */
+ if (now < rate && best < now)
+ return true;
}
return false;
}
-static const struct pll_params_table *
-meson_clk_get_pll_settings(unsigned long rate,
- unsigned long parent_rate,
- struct meson_clk_pll_data *pll)
+static int meson_clk_get_pll_table_index(unsigned int index,
+ unsigned int *m,
+ unsigned int *n,
+ struct meson_clk_pll_data *pll)
{
- const struct pll_params_table *table = pll->table;
- unsigned long best = 0, now = 0;
- unsigned int i, best_i = 0;
+ if (!pll->table[index].n)
+ return -EINVAL;
+
+ *m = pll->table[index].m;
+ *n = pll->table[index].n;
+
+ return 0;
+}
+
+static unsigned int meson_clk_get_pll_range_m(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int n,
+ struct meson_clk_pll_data *pll)
+{
+ u64 val = (u64)rate * n;
- if (!table)
- return NULL;
+ if (__pll_round_closest_mult(pll))
+ return DIV_ROUND_CLOSEST_ULL(val, parent_rate);
- for (i = 0; table[i].n; i++) {
- now = __pll_params_to_rate(parent_rate, &table[i], 0, pll);
+ return div_u64(val, parent_rate);
+}
- /* If we get an exact match, don't bother any further */
- if (now == rate) {
- return &table[i];
- } else if (meson_clk_pll_is_better(rate, best, now, pll)) {
+static int meson_clk_get_pll_range_index(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int index,
+ unsigned int *m,
+ unsigned int *n,
+ struct meson_clk_pll_data *pll)
+{
+ *n = index + 1;
+
+ /* Check the predivider range */
+ if (*n >= (1 << pll->n.width))
+ return -EINVAL;
+
+ if (*n == 1) {
+ /* Get the boundaries out the way */
+ if (rate <= pll->range->min * parent_rate) {
+ *m = pll->range->min;
+ return -ENODATA;
+ } else if (rate >= pll->range->max * parent_rate) {
+ *m = pll->range->max;
+ return -ENODATA;
+ }
+ }
+
+ *m = meson_clk_get_pll_range_m(rate, parent_rate, *n, pll);
+
+ /* the pre-divider gives a multiplier too big - stop */
+ if (*m >= (1 << pll->m.width))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int meson_clk_get_pll_get_index(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int index,
+ unsigned int *m,
+ unsigned int *n,
+ struct meson_clk_pll_data *pll)
+{
+ if (pll->range)
+ return meson_clk_get_pll_range_index(rate, parent_rate,
+ index, m, n, pll);
+ else if (pll->table)
+ return meson_clk_get_pll_table_index(index, m, n, pll);
+
+ return -EINVAL;
+}
+
+static int meson_clk_get_pll_settings(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int *best_m,
+ unsigned int *best_n,
+ struct meson_clk_pll_data *pll)
+{
+ unsigned long best = 0, now = 0;
+ unsigned int i, m, n;
+ int ret;
+
+ for (i = 0, ret = 0; !ret; i++) {
+ ret = meson_clk_get_pll_get_index(rate, parent_rate,
+ i, &m, &n, pll);
+ if (ret == -EINVAL)
+ break;
+
+ now = __pll_params_to_rate(parent_rate, m, n, 0, pll);
+ if (meson_clk_pll_is_better(rate, best, now, pll)) {
best = now;
- best_i = i;
+ *best_m = m;
+ *best_n = n;
+
+ if (now == rate)
+ break;
}
}
- return (struct pll_params_table *)&table[best_i];
+ return best ? 0 : -EINVAL;
}
static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -148,15 +238,15 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- const struct pll_params_table *pllt =
- meson_clk_get_pll_settings(rate, *parent_rate, pll);
+ unsigned int m, n, frac;
unsigned long round;
- u16 frac;
+ int ret;
- if (!pllt)
+ ret = meson_clk_get_pll_settings(rate, *parent_rate, &m, &n, pll);
+ if (ret)
return meson_clk_pll_recalc_rate(hw, *parent_rate);
- round = __pll_params_to_rate(*parent_rate, pllt, 0, pll);
+ round = __pll_params_to_rate(*parent_rate, m, n, 0, pll);
if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round)
return round;
@@ -165,9 +255,9 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* The rate provided by the setting is not an exact match, let's
* try to improve the result using the fractional parameter
*/
- frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll);
+ frac = __pll_params_with_frac(rate, *parent_rate, m, n, pll);
- return __pll_params_to_rate(*parent_rate, pllt, frac, pll);
+ return __pll_params_to_rate(*parent_rate, m, n, frac, pll);
}
static int meson_clk_pll_wait_lock(struct clk_hw *hw)
@@ -254,30 +344,27 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- const struct pll_params_table *pllt;
- unsigned int enabled;
+ unsigned int enabled, m, n, frac = 0, ret;
unsigned long old_rate;
- u16 frac = 0;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
old_rate = rate;
- pllt = meson_clk_get_pll_settings(rate, parent_rate, pll);
- if (!pllt)
- return -EINVAL;
+ ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
+ if (ret)
+ return ret;
enabled = meson_parm_read(clk->map, &pll->en);
if (enabled)
meson_clk_pll_disable(hw);
- meson_parm_write(clk->map, &pll->n, pllt->n);
- meson_parm_write(clk->map, &pll->m, pllt->m);
-
+ meson_parm_write(clk->map, &pll->n, n);
+ meson_parm_write(clk->map, &pll->m, m);
if (MESON_PARM_APPLICABLE(&pll->frac)) {
- frac = __pll_params_with_frac(rate, parent_rate, pllt, pll);
+ frac = __pll_params_with_frac(rate, parent_rate, m, n, pll);
meson_parm_write(clk->map, &pll->frac, frac);
}
@@ -309,8 +396,15 @@ const struct clk_ops meson_clk_pll_ops = {
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
+EXPORT_SYMBOL_GPL(meson_clk_pll_ops);
const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
+EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic PLL driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
new file mode 100644
index 000000000000..55af2e285b1b
--- /dev/null
+++ b/drivers/clk/meson/clk-pll.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_PLL_H
+#define __MESON_CLK_PLL_H
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include "parm.h"
+
+struct pll_params_table {
+ unsigned int m;
+ unsigned int n;
+};
+
+struct pll_mult_range {
+ unsigned int min;
+ unsigned int max;
+};
+
+#define PLL_PARAMS(_m, _n) \
+ { \
+ .m = (_m), \
+ .n = (_n), \
+ }
+
+#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
+
+struct meson_clk_pll_data {
+ struct parm en;
+ struct parm m;
+ struct parm n;
+ struct parm frac;
+ struct parm l;
+ struct parm rst;
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ const struct pll_params_table *table;
+ const struct pll_mult_range *range;
+ u8 flags;
+};
+
+extern const struct clk_ops meson_clk_pll_ro_ops;
+extern const struct clk_ops meson_clk_pll_ops;
+
+#endif /* __MESON_CLK_PLL_H */
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index c515f67322a3..dcd1757cc5df 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,6 +4,7 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/module.h>
#include "clk-regmap.h"
static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
@@ -180,3 +181,7 @@ const struct clk_ops clk_regmap_mux_ro_ops = {
.get_parent = clk_regmap_mux_get_parent,
};
EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e9c5728d40eb..1dd0abe3ba91 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -111,4 +111,24 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
+#define __MESON_GATE(_name, _reg, _bit, _ops) \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = _ops, \
+ .parent_names = (const char *[]){ "clk81" }, \
+ .num_parents = 1, \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ }, \
+}
+
+#define MESON_GATE(_name, _reg, _bit) \
+ __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ops)
+
+#define MESON_GATE_RO(_name, _reg, _bit) \
+ __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ro_ops)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
deleted file mode 100644
index 4a59936251e5..000000000000
--- a/drivers/clk/meson/clk-triphase.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright (c) 2018 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include "clkc-audio.h"
-
-/*
- * This is a special clock for the audio controller.
- * The phase of mst_sclk clock output can be controlled independently
- * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
- * Controlling these 3 phases as just one makes things simpler and
- * give the same clock view to all the element on the i2s bus.
- * If necessary, we can still control the phase in the tdm block
- * which makes these independent control redundant.
- */
-static inline struct meson_clk_triphase_data *
-meson_clk_triphase_data(struct clk_regmap *clk)
-{
- return (struct meson_clk_triphase_data *)clk->data;
-}
-
-static void meson_clk_triphase_sync(struct clk_hw *hw)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
- unsigned int val;
-
- /* Get phase 0 and sync it to phase 1 and 2 */
- val = meson_parm_read(clk->map, &tph->ph0);
- meson_parm_write(clk->map, &tph->ph1, val);
- meson_parm_write(clk->map, &tph->ph2, val);
-}
-
-static int meson_clk_triphase_get_phase(struct clk_hw *hw)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
- unsigned int val;
-
- /* Phase are in sync, reading phase 0 is enough */
- val = meson_parm_read(clk->map, &tph->ph0);
-
- return meson_clk_degrees_from_val(val, tph->ph0.width);
-}
-
-static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
- unsigned int val;
-
- val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
- meson_parm_write(clk->map, &tph->ph0, val);
- meson_parm_write(clk->map, &tph->ph1, val);
- meson_parm_write(clk->map, &tph->ph2, val);
-
- return 0;
-}
-
-const struct clk_ops meson_clk_triphase_ops = {
- .init = meson_clk_triphase_sync,
- .get_phase = meson_clk_triphase_get_phase,
- .set_phase = meson_clk_triphase_set_phase,
-};
-EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
deleted file mode 100644
index 6183b22c4bf2..000000000000
--- a/drivers/clk/meson/clkc.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- */
-
-#ifndef __CLKC_H
-#define __CLKC_H
-
-#include <linux/clk-provider.h>
-#include "clk-regmap.h"
-
-#define PMASK(width) GENMASK(width - 1, 0)
-#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
-#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
-
-#define PARM_GET(width, shift, reg) \
- (((reg) & SETPMASK(width, shift)) >> (shift))
-#define PARM_SET(width, shift, reg, val) \
- (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
-
-#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
-
-struct parm {
- u16 reg_off;
- u8 shift;
- u8 width;
-};
-
-static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
-{
- unsigned int val;
-
- regmap_read(map, p->reg_off, &val);
- return PARM_GET(p->width, p->shift, val);
-}
-
-static inline void meson_parm_write(struct regmap *map, struct parm *p,
- unsigned int val)
-{
- regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
- val << p->shift);
-}
-
-
-struct pll_params_table {
- u16 m;
- u16 n;
-};
-
-#define PLL_PARAMS(_m, _n) \
- { \
- .m = (_m), \
- .n = (_n), \
- }
-
-#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
-
-struct meson_clk_pll_data {
- struct parm en;
- struct parm m;
- struct parm n;
- struct parm frac;
- struct parm l;
- struct parm rst;
- const struct reg_sequence *init_regs;
- unsigned int init_count;
- const struct pll_params_table *table;
- u8 flags;
-};
-
-#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-
-struct meson_clk_mpll_data {
- struct parm sdm;
- struct parm sdm_en;
- struct parm n2;
- struct parm ssen;
- struct parm misc;
- spinlock_t *lock;
- u8 flags;
-};
-
-#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
-
-struct meson_clk_phase_data {
- struct parm ph;
-};
-
-int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
-unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
-
-struct meson_vid_pll_div_data {
- struct parm val;
- struct parm sel;
-};
-
-#define MESON_GATE(_name, _reg, _bit) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ "clk81" }, \
- .num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
- }, \
-};
-
-/* clk_ops */
-extern const struct clk_ops meson_clk_pll_ro_ops;
-extern const struct clk_ops meson_clk_pll_ops;
-extern const struct clk_ops meson_clk_cpu_ops;
-extern const struct clk_ops meson_clk_mpll_ro_ops;
-extern const struct clk_ops meson_clk_mpll_ops;
-extern const struct clk_ops meson_clk_phase_ops;
-extern const struct clk_ops meson_vid_pll_div_ro_ops;
-
-struct clk_hw *meson_clk_hw_register_input(struct device *dev,
- const char *of_name,
- const char *clk_name,
- unsigned long flags);
-
-#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
new file mode 100644
index 000000000000..1994e735396b
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson-AXG Clock Controller Driver
+ *
+ * Copyright (c) 2016 Baylibre SAS.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2019 Baylibre SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include "meson-aoclk.h"
+#include "g12a-aoclk.h"
+
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+#define IN_PREFIX "ao-in-"
+
+/*
+ * AO Configuration Clock registers offsets
+ * Register offsets from the data sheet must be multiplied by 4.
+ */
+#define AO_RTI_STATUS_REG3 0x0C
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_CLK_GATE0 0x4c
+#define AO_CLK_GATE0_SP 0x50
+#define AO_OSCIN_CNTL 0x58
+#define AO_CEC_CLK_CNTL_REG0 0x74
+#define AO_CEC_CLK_CNTL_REG1 0x78
+#define AO_SAR_CLK 0x90
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
+/*
+ * Like every other peripheral clock gate in Amlogic Clock drivers,
+ * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
+ * bootloader. The goal is to remove this flag at some point.
+ * Actually removing it will require some extensive test to be done safely.
+ */
+#define AXG_AO_GATE(_name, _reg, _bit) \
+static struct clk_regmap g12a_aoclk_##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "g12a_ao_" #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
+ .num_parents = 1, \
+ .flags = CLK_IGNORE_UNUSED, \
+ }, \
+}
+
+AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
+AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
+AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
+AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
+AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
+AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
+AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
+AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
+AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
+AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
+AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
+AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
+AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
+AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
+AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
+
+static struct clk_regmap g12a_aoclk_cts_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .bit_idx = 14,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_oscin",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
+ {
+ .dual = 1,
+ .n1 = 733,
+ .m1 = 8,
+ .n2 = 732,
+ .m2 = 11,
+ }, {}
+};
+
+/* 32k_by_oscin clock */
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = g12a_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTC_ALT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_div",
+ "g12a_ao_32k_by_oscin_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* cec clock */
+
+static struct clk_regmap g12a_aoclk_cec_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_CEC_CLK_CNTL_REG0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cec_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_CEC_CLK_CNTL_REG0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = g12a_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "g12a_ao_cec_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cec_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_CEC_CLK_CNTL_REG1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "g12a_ao_cec_div",
+ "g12a_ao_cec_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cec = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_CEC_CLK_CNTL_REG0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "g12a_ao_cec_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 10,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cts_rtc_oscin",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin",
+ IN_PREFIX "ext_32k-0" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_clk81 = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 8,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_clk81",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
+ "g12a_ao_cts_rtc_oscin"},
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_saradc_mux = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_SAR_CLK,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_saradc_mux",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "g12a_ao_clk81" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_saradc_div = {
+ .data = &(struct clk_regmap_div_data) {
+ .offset = AO_SAR_CLK,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_saradc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "g12a_ao_saradc_mux" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_saradc_gate = {
+ .data = &(struct clk_regmap_gate_data) {
+ .offset = AO_SAR_CLK,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_saradc_gate",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "g12a_ao_saradc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const unsigned int g12a_aoclk_reset[] = {
+ [RESET_AO_IR_IN] = 16,
+ [RESET_AO_UART] = 17,
+ [RESET_AO_I2C_M] = 18,
+ [RESET_AO_I2C_S] = 19,
+ [RESET_AO_SAR_ADC] = 20,
+ [RESET_AO_UART2] = 22,
+ [RESET_AO_IR_OUT] = 23,
+};
+
+static struct clk_regmap *g12a_aoclk_regmap[] = {
+ &g12a_aoclk_ahb,
+ &g12a_aoclk_ir_in,
+ &g12a_aoclk_i2c_m0,
+ &g12a_aoclk_i2c_s0,
+ &g12a_aoclk_uart,
+ &g12a_aoclk_prod_i2c,
+ &g12a_aoclk_uart2,
+ &g12a_aoclk_ir_out,
+ &g12a_aoclk_saradc,
+ &g12a_aoclk_mailbox,
+ &g12a_aoclk_m3,
+ &g12a_aoclk_ahb_sram,
+ &g12a_aoclk_rti,
+ &g12a_aoclk_m4_fclk,
+ &g12a_aoclk_m4_hclk,
+ &g12a_aoclk_cts_oscin,
+ &g12a_aoclk_32k_by_oscin_pre,
+ &g12a_aoclk_32k_by_oscin_div,
+ &g12a_aoclk_32k_by_oscin_sel,
+ &g12a_aoclk_32k_by_oscin,
+ &g12a_aoclk_cec_pre,
+ &g12a_aoclk_cec_div,
+ &g12a_aoclk_cec_sel,
+ &g12a_aoclk_cec,
+ &g12a_aoclk_cts_rtc_oscin,
+ &g12a_aoclk_clk81,
+ &g12a_aoclk_saradc_mux,
+ &g12a_aoclk_saradc_div,
+ &g12a_aoclk_saradc_gate,
+};
+
+static const struct clk_hw_onecell_data g12a_aoclk_onecell_data = {
+ .hws = {
+ [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
+ [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
+ [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
+ [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
+ [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
+ [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
+ [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
+ [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
+ [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
+ [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
+ [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
+ [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
+ [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
+ [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
+ [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
+ [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
+ [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
+ [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
+ [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
+ [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
+ [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
+ [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
+ [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
+ },
+ .num = NR_CLKS,
+};
+
+static const struct meson_aoclk_input g12a_aoclk_inputs[] = {
+ { .name = "xtal", .required = true },
+ { .name = "mpeg-clk", .required = true },
+ { .name = "ext-32k-0", .required = false },
+};
+
+static const struct meson_aoclk_data g12a_aoclkc_data = {
+ .reset_reg = AO_RTI_GEN_CNTL_REG0,
+ .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
+ .reset = g12a_aoclk_reset,
+ .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
+ .clks = g12a_aoclk_regmap,
+ .hw_data = &g12a_aoclk_onecell_data,
+ .inputs = g12a_aoclk_inputs,
+ .num_inputs = ARRAY_SIZE(g12a_aoclk_inputs),
+ .input_prefix = IN_PREFIX,
+};
+
+static const struct of_device_id g12a_aoclkc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-g12a-aoclkc",
+ .data = &g12a_aoclkc_data,
+ },
+ { }
+};
+
+static struct platform_driver g12a_aoclkc_driver = {
+ .probe = meson_aoclkc_probe,
+ .driver = {
+ .name = "g12a-aoclkc",
+ .of_match_table = g12a_aoclkc_match_table,
+ },
+};
+
+builtin_platform_driver(g12a_aoclkc_driver);
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h
new file mode 100644
index 000000000000..04b0d5506641
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __G12A_AOCLKC_H
+#define __G12A_AOCLKC_H
+
+/*
+ * CLKID index values
+ *
+ * These indices are entirely contrived and do not map onto the hardware.
+ * It has now been decided to expose everything by default in the DT header:
+ * include/dt-bindings/clock/g12a-aoclkc.h. Only the clocks ids we don't want
+ * to expose, such as the internal muxes and dividers of composite clocks,
+ * will remain defined here.
+ */
+#define CLKID_AO_SAR_ADC_SEL 16
+#define CLKID_AO_SAR_ADC_DIV 17
+#define CLKID_AO_CTS_OSCIN 19
+#define CLKID_AO_32K_PRE 20
+#define CLKID_AO_32K_DIV 21
+#define CLKID_AO_32K_SEL 22
+#define CLKID_AO_CEC_PRE 24
+#define CLKID_AO_CEC_DIV 25
+#define CLKID_AO_CEC_SEL 26
+
+#define NR_CLKS 29
+
+#include <dt-bindings/clock/g12a-aoclkc.h>
+#include <dt-bindings/reset/g12a-aoclkc.h>
+
+#endif /* __G12A_AOCLKC_H */
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
new file mode 100644
index 000000000000..0e1ce8c03259
--- /dev/null
+++ b/drivers/clk/meson/g12a.c
@@ -0,0 +1,2359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson-G12A Clock Controller Driver
+ *
+ * Copyright (c) 2016 Baylibre SAS.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-input.h"
+#include "clk-mpll.h"
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "vid-pll-div.h"
+#include "meson-eeclk.h"
+#include "g12a.h"
+
+static DEFINE_SPINLOCK(meson_clk_lock);
+
+static struct clk_regmap g12a_fixed_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_FIX_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fixed_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_FIX_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ /*
+ * This clock won't ever change at runtime so
+ * CLK_SET_RATE_PARENT is not required
+ */
+ },
+};
+
+/*
+ * Internal sys pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_sys_init_regs[] = {
+ { .reg = HHI_SYS_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_SYS_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_SYS_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_SYS_PLL_CNTL4, .def = 0x88770290 },
+ { .reg = HHI_SYS_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_SYS_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_sys_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .init_regs = g12a_sys_init_regs,
+ .init_count = ARRAY_SIZE(g12a_sys_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_sys_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+ .min = 55,
+ .max = 255,
+};
+
+/*
+ * Internal gp0 pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_gp0_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_gp0_init_regs,
+ .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/*
+ * Internal hifi pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_hifi_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_hifi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_hifi_init_regs,
+ .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "hifi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL1,
+ .shift = 0,
+ .width = 16,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 30,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 20,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od2" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div3_div = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 20,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 21,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div7_div = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2p5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2p5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2p5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_50m_div = {
+ .mult = 1,
+ .div = 80,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll_50m = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_FIX_PLL_CNTL3,
+ .mask = 0x1,
+ .shift = 5,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "mpll_50m_div" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_prediv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL1,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL3,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL5,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mpll3_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll3_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll3_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const char * const clk81_parent_names[] = {
+ IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ "fclk_div3", "fclk_div5"
+};
+
+static struct clk_regmap g12a_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = clk81_parent_names,
+ .num_parents = ARRAY_SIZE(clk81_parent_names),
+ },
+};
+
+static struct clk_regmap g12a_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "clk81",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ },
+};
+
+static const char * const g12a_sd_emmc_clk0_parent_names[] = {
+ IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+
+ /*
+ * Following these parent clocks, we should also have had mpll2, mpll3
+ * and gp0_pll but these clocks are too precious to be used here. All
+ * the necessary rates for MMC and NAND operation can be acheived using
+ * g12a_ee_core or fclk_div clocks
+ */
+};
+
+/* SDIO clock */
+static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_a_clk0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_a_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_a_clk0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_a_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_a_clk0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* SDcard clock */
+static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_b_clk0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_b_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_b_clk0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_b_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_b_clk0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* EMMC/NAND clock */
+static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_c_clk0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_c_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_c_clk0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_c_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_c_clk0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* VPU Clock */
+
+static const char * const g12a_vpu_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+ "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
+};
+
+static struct clk_regmap g12a_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vpu_0 or vpu_1
+ */
+ .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* VAPB Clock */
+
+static const char * const g12a_vapb_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+ "mpll1", "vid_pll", "mpll2", "fclk_div2p5",
+};
+
+static struct clk_regmap g12a_vapb_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vapb_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vapb_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vapb_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vapb_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vapb_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vapb_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vapb_0 or vapb_1
+ */
+ .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vapb_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* Video Clocks */
+
+static struct clk_regmap g12a_vid_pll_div = {
+ .data = &(struct meson_vid_pll_div_data){
+ .val = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 0,
+ .width = 15,
+ },
+ .sel = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 16,
+ .width = 2,
+ },
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll_div",
+ .ops = &meson_vid_pll_div_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const char * const g12a_vid_pll_parent_names[] = { "vid_pll_div",
+ "hdmi_pll" };
+
+static struct clk_regmap g12a_vid_pll_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .mask = 0x1,
+ .shift = 18,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 18 selects from 2 possible parents:
+ * vid_pll_div or hdmi_pll
+ */
+ .parent_names = g12a_vid_pll_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vid_pll = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vid_pll_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static const char * const g12a_vclk_parent_names[] = {
+ "vid_pll", "gp0_pll", "hifi_pll", "mpll1", "fclk_div3", "fclk_div4",
+ "fclk_div5", "fclk_div7"
+};
+
+static struct clk_regmap g12a_vclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vclk_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vclk_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vclk_input" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vclk2_input" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div2_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div4_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div6_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div12_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div2_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div4_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div6_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div12_en" },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const char * const g12a_cts_parent_names[] = {
+ "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
+ "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
+ "vclk2_div6", "vclk2_div12"
+};
+
+static struct clk_regmap g12a_cts_enci_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enci_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_cts_encp_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 20,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encp_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_cts_vdac_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_vdac_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+/* TOFIX: add support for cts_tcon */
+static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const char * const g12a_cts_hdmi_tx_parent_names[] = {
+ "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
+ "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
+ "vclk2_div6", "vclk2_div12"
+};
+
+static struct clk_regmap g12a_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = mux_table_hdmi_tx_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_hdmi_tx_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_cts_enci = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_enci",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_enci_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_cts_encp = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_encp",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_encp_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_cts_vdac = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_vdac",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_vdac_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_tx = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 5,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi_tx",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "hdmi_tx_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* HDMI Clocks */
+
+static const char * const g12a_hdmi_parent_names[] = {
+ IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+};
+
+static struct clk_regmap g12a_hdmi_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_hdmi_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "hdmi_sel" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "hdmi_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/*
+ * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
+ * muxed by a glitch-free switch.
+ */
+
+static const char * const g12a_mali_0_1_parent_names[] = {
+ IN_PREFIX "xtal", "gp0_pll", "hihi_pll", "fclk_div2p5",
+ "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_regmap g12a_mali_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_mali_0_1_parent_names,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_mali_0_1_parent_names,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const g12a_mali_parent_names[] = {
+ "mali_0", "mali_1"
+};
+
+static struct clk_regmap g12a_mali = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_mali_parent_names,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* Everything Else (EE) domain gates */
+static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
+static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
+static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
+static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
+static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
+static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
+static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
+static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
+static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
+static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
+static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
+static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
+static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
+static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
+static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
+static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
+static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
+static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
+static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
+static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
+static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
+static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
+
+static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
+static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
+static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
+static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
+static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
+static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
+static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
+static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
+static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
+static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
+static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
+static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
+static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
+
+static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
+static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
+static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
+static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
+static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
+static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
+static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
+static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
+static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
+static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
+
+static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
+static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
+static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
+static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
+static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
+static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
+static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
+static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
+static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
+static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
+static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
+static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
+static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
+static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
+static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
+static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
+static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
+static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
+static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
+
+static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
+static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
+static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
+static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
+static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
+
+/* Array of all clocks provided by this provider */
+static struct clk_hw_onecell_data g12a_hw_onecell_data = {
+ .hws = {
+ [CLKID_SYS_PLL] = &g12a_sys_pll.hw,
+ [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw,
+ [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
+ [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
+ [CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
+ [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &g12a_clk81.hw,
+ [CLKID_MPLL0] = &g12a_mpll0.hw,
+ [CLKID_MPLL1] = &g12a_mpll1.hw,
+ [CLKID_MPLL2] = &g12a_mpll2.hw,
+ [CLKID_MPLL3] = &g12a_mpll3.hw,
+ [CLKID_DDR] = &g12a_ddr.hw,
+ [CLKID_DOS] = &g12a_dos.hw,
+ [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw,
+ [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw,
+ [CLKID_ETH_PHY] = &g12a_eth_phy.hw,
+ [CLKID_ISA] = &g12a_isa.hw,
+ [CLKID_PL301] = &g12a_pl301.hw,
+ [CLKID_PERIPHS] = &g12a_periphs.hw,
+ [CLKID_SPICC0] = &g12a_spicc_0.hw,
+ [CLKID_I2C] = &g12a_i2c.hw,
+ [CLKID_SANA] = &g12a_sana.hw,
+ [CLKID_SD] = &g12a_sd.hw,
+ [CLKID_RNG0] = &g12a_rng0.hw,
+ [CLKID_UART0] = &g12a_uart0.hw,
+ [CLKID_SPICC1] = &g12a_spicc_1.hw,
+ [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw,
+ [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw,
+ [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw,
+ [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw,
+ [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw,
+ [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw,
+ [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw,
+ [CLKID_AUDIO] = &g12a_audio.hw,
+ [CLKID_ETH] = &g12a_eth_core.hw,
+ [CLKID_DEMUX] = &g12a_demux.hw,
+ [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw,
+ [CLKID_ADC] = &g12a_adc.hw,
+ [CLKID_UART1] = &g12a_uart1.hw,
+ [CLKID_G2D] = &g12a_g2d.hw,
+ [CLKID_RESET] = &g12a_reset.hw,
+ [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw,
+ [CLKID_PARSER] = &g12a_parser.hw,
+ [CLKID_USB] = &g12a_usb_general.hw,
+ [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw,
+ [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw,
+ [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw,
+ [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw,
+ [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw,
+ [CLKID_BT656] = &g12a_bt656.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw,
+ [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw,
+ [CLKID_UART2] = &g12a_uart2.hw,
+ [CLKID_VPU_INTR] = &g12a_vpu_intr.hw,
+ [CLKID_GIC] = &g12a_gic.hw,
+ [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw,
+ [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw,
+ [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw,
+ [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw,
+ [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw,
+ [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw,
+ [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw,
+ [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw,
+ [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw,
+ [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw,
+ [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw,
+ [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw,
+ [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw,
+ [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw,
+ [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw,
+ [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw,
+ [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw,
+ [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw,
+ [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw,
+ [CLKID_DAC_CLK] = &g12a_dac_clk.hw,
+ [CLKID_AOCLK] = &g12a_aoclk_gate.hw,
+ [CLKID_IEC958] = &g12a_iec958_gate.hw,
+ [CLKID_ENC480P] = &g12a_enc480p.hw,
+ [CLKID_RNG1] = &g12a_rng1.hw,
+ [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw,
+ [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw,
+ [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw,
+ [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw,
+ [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw,
+ [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw,
+ [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw,
+ [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw,
+ [CLKID_DMA] = &g12a_dma.hw,
+ [CLKID_EFUSE] = &g12a_efuse.hw,
+ [CLKID_ROM_BOOT] = &g12a_rom_boot.hw,
+ [CLKID_RESET_SEC] = &g12a_reset_sec.hw,
+ [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw,
+ [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw,
+ [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw,
+ [CLKID_VPU_0] = &g12a_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw,
+ [CLKID_VPU_1] = &g12a_vpu_1.hw,
+ [CLKID_VPU] = &g12a_vpu.hw,
+ [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw,
+ [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw,
+ [CLKID_VAPB_0] = &g12a_vapb_0.hw,
+ [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw,
+ [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw,
+ [CLKID_VAPB_1] = &g12a_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw,
+ [CLKID_VAPB] = &g12a_vapb.hw,
+ [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw,
+ [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw,
+ [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw,
+ [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw,
+ [CLKID_VID_PLL] = &g12a_vid_pll_div.hw,
+ [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw,
+ [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw,
+ [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw,
+ [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw,
+ [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw,
+ [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw,
+ [CLKID_VCLK_DIV] = &g12a_vclk_div.hw,
+ [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw,
+ [CLKID_VCLK] = &g12a_vclk.hw,
+ [CLKID_VCLK2] = &g12a_vclk2.hw,
+ [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw,
+ [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw,
+ [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw,
+ [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw,
+ [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw,
+ [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw,
+ [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw,
+ [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw,
+ [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw,
+ [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw,
+ [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw,
+ [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw,
+ [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw,
+ [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw,
+ [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw,
+ [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw,
+ [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw,
+ [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
+ [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
+ [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
+ [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
+ [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
+ [CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
+ [CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
+ [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
+ [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
+ [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
+ [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw,
+ [CLKID_HDMI] = &g12a_hdmi.hw,
+ [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw,
+ [CLKID_MALI_0] = &g12a_mali_0.hw,
+ [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw,
+ [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
+ [CLKID_MALI_1] = &g12a_mali_1.hw,
+ [CLKID_MALI] = &g12a_mali.hw,
+ [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
+ [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe */
+static struct clk_regmap *const g12a_clk_regmaps[] = {
+ &g12a_clk81,
+ &g12a_dos,
+ &g12a_ddr,
+ &g12a_audio_locker,
+ &g12a_mipi_dsi_host,
+ &g12a_eth_phy,
+ &g12a_isa,
+ &g12a_pl301,
+ &g12a_periphs,
+ &g12a_spicc_0,
+ &g12a_i2c,
+ &g12a_sana,
+ &g12a_sd,
+ &g12a_rng0,
+ &g12a_uart0,
+ &g12a_spicc_1,
+ &g12a_hiu_reg,
+ &g12a_mipi_dsi_phy,
+ &g12a_assist_misc,
+ &g12a_emmc_a,
+ &g12a_emmc_b,
+ &g12a_emmc_c,
+ &g12a_audio_codec,
+ &g12a_audio,
+ &g12a_eth_core,
+ &g12a_demux,
+ &g12a_audio_ififo,
+ &g12a_adc,
+ &g12a_uart1,
+ &g12a_g2d,
+ &g12a_reset,
+ &g12a_pcie_comb,
+ &g12a_parser,
+ &g12a_usb_general,
+ &g12a_pcie_phy,
+ &g12a_ahb_arb0,
+ &g12a_ahb_data_bus,
+ &g12a_ahb_ctrl_bus,
+ &g12a_htx_hdcp22,
+ &g12a_htx_pclk,
+ &g12a_bt656,
+ &g12a_usb1_to_ddr,
+ &g12a_mmc_pclk,
+ &g12a_vpu_intr,
+ &g12a_gic,
+ &g12a_sd_emmc_a_clk0,
+ &g12a_sd_emmc_b_clk0,
+ &g12a_sd_emmc_c_clk0,
+ &g12a_mpeg_clk_div,
+ &g12a_sd_emmc_a_clk0_div,
+ &g12a_sd_emmc_b_clk0_div,
+ &g12a_sd_emmc_c_clk0_div,
+ &g12a_mpeg_clk_sel,
+ &g12a_sd_emmc_a_clk0_sel,
+ &g12a_sd_emmc_b_clk0_sel,
+ &g12a_sd_emmc_c_clk0_sel,
+ &g12a_mpll0,
+ &g12a_mpll1,
+ &g12a_mpll2,
+ &g12a_mpll3,
+ &g12a_mpll0_div,
+ &g12a_mpll1_div,
+ &g12a_mpll2_div,
+ &g12a_mpll3_div,
+ &g12a_fixed_pll,
+ &g12a_sys_pll,
+ &g12a_gp0_pll,
+ &g12a_hifi_pll,
+ &g12a_vclk2_venci0,
+ &g12a_vclk2_venci1,
+ &g12a_vclk2_vencp0,
+ &g12a_vclk2_vencp1,
+ &g12a_vclk2_venct0,
+ &g12a_vclk2_venct1,
+ &g12a_vclk2_other,
+ &g12a_vclk2_enci,
+ &g12a_vclk2_encp,
+ &g12a_dac_clk,
+ &g12a_aoclk_gate,
+ &g12a_iec958_gate,
+ &g12a_enc480p,
+ &g12a_rng1,
+ &g12a_vclk2_enct,
+ &g12a_vclk2_encl,
+ &g12a_vclk2_venclmmc,
+ &g12a_vclk2_vencl,
+ &g12a_vclk2_other1,
+ &g12a_fixed_pll_dco,
+ &g12a_sys_pll_dco,
+ &g12a_gp0_pll_dco,
+ &g12a_hifi_pll_dco,
+ &g12a_fclk_div2,
+ &g12a_fclk_div3,
+ &g12a_fclk_div4,
+ &g12a_fclk_div5,
+ &g12a_fclk_div7,
+ &g12a_fclk_div2p5,
+ &g12a_dma,
+ &g12a_efuse,
+ &g12a_rom_boot,
+ &g12a_reset_sec,
+ &g12a_sec_ahb_apb3,
+ &g12a_vpu_0_sel,
+ &g12a_vpu_0_div,
+ &g12a_vpu_0,
+ &g12a_vpu_1_sel,
+ &g12a_vpu_1_div,
+ &g12a_vpu_1,
+ &g12a_vpu,
+ &g12a_vapb_0_sel,
+ &g12a_vapb_0_div,
+ &g12a_vapb_0,
+ &g12a_vapb_1_sel,
+ &g12a_vapb_1_div,
+ &g12a_vapb_1,
+ &g12a_vapb_sel,
+ &g12a_vapb,
+ &g12a_hdmi_pll_dco,
+ &g12a_hdmi_pll_od,
+ &g12a_hdmi_pll_od2,
+ &g12a_hdmi_pll,
+ &g12a_vid_pll_div,
+ &g12a_vid_pll_sel,
+ &g12a_vid_pll,
+ &g12a_vclk_sel,
+ &g12a_vclk2_sel,
+ &g12a_vclk_input,
+ &g12a_vclk2_input,
+ &g12a_vclk_div,
+ &g12a_vclk2_div,
+ &g12a_vclk,
+ &g12a_vclk2,
+ &g12a_vclk_div1,
+ &g12a_vclk_div2_en,
+ &g12a_vclk_div4_en,
+ &g12a_vclk_div6_en,
+ &g12a_vclk_div12_en,
+ &g12a_vclk2_div1,
+ &g12a_vclk2_div2_en,
+ &g12a_vclk2_div4_en,
+ &g12a_vclk2_div6_en,
+ &g12a_vclk2_div12_en,
+ &g12a_cts_enci_sel,
+ &g12a_cts_encp_sel,
+ &g12a_cts_vdac_sel,
+ &g12a_hdmi_tx_sel,
+ &g12a_cts_enci,
+ &g12a_cts_encp,
+ &g12a_cts_vdac,
+ &g12a_hdmi_tx,
+ &g12a_hdmi_sel,
+ &g12a_hdmi_div,
+ &g12a_hdmi,
+ &g12a_mali_0_sel,
+ &g12a_mali_0_div,
+ &g12a_mali_0,
+ &g12a_mali_1_sel,
+ &g12a_mali_1_div,
+ &g12a_mali_1,
+ &g12a_mali,
+ &g12a_mpll_50m,
+};
+
+static const struct meson_eeclkc_data g12a_clkc_data = {
+ .regmap_clks = g12a_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+ .hw_onecell_data = &g12a_hw_onecell_data
+};
+
+static const struct of_device_id clkc_match_table[] = {
+ { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data },
+ {}
+};
+
+static struct platform_driver g12a_driver = {
+ .probe = meson_eeclkc_probe,
+ .driver = {
+ .name = "g12a-clkc",
+ .of_match_table = clkc_match_table,
+ },
+};
+
+builtin_platform_driver(g12a_driver);
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
new file mode 100644
index 000000000000..f399dfe1401c
--- /dev/null
+++ b/drivers/clk/meson/g12a.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2016 Amlogic, Inc.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ */
+#ifndef __G12A_H
+#define __G12A_H
+
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the data sheet must be multiplied by 4 before
+ * adding them to the base address to get the right value.
+ */
+#define HHI_MIPI_CNTL0 0x000
+#define HHI_MIPI_CNTL1 0x004
+#define HHI_MIPI_CNTL2 0x008
+#define HHI_MIPI_STS 0x00C
+#define HHI_GP0_PLL_CNTL0 0x040
+#define HHI_GP0_PLL_CNTL1 0x044
+#define HHI_GP0_PLL_CNTL2 0x048
+#define HHI_GP0_PLL_CNTL3 0x04C
+#define HHI_GP0_PLL_CNTL4 0x050
+#define HHI_GP0_PLL_CNTL5 0x054
+#define HHI_GP0_PLL_CNTL6 0x058
+#define HHI_GP0_PLL_STS 0x05C
+#define HHI_PCIE_PLL_CNTL0 0x098
+#define HHI_PCIE_PLL_CNTL1 0x09C
+#define HHI_PCIE_PLL_CNTL2 0x0A0
+#define HHI_PCIE_PLL_CNTL3 0x0A4
+#define HHI_PCIE_PLL_CNTL4 0x0A8
+#define HHI_PCIE_PLL_CNTL5 0x0AC
+#define HHI_PCIE_PLL_STS 0x0B8
+#define HHI_HIFI_PLL_CNTL0 0x0D8
+#define HHI_HIFI_PLL_CNTL1 0x0DC
+#define HHI_HIFI_PLL_CNTL2 0x0E0
+#define HHI_HIFI_PLL_CNTL3 0x0E4
+#define HHI_HIFI_PLL_CNTL4 0x0E8
+#define HHI_HIFI_PLL_CNTL5 0x0EC
+#define HHI_HIFI_PLL_CNTL6 0x0F0
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12C
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_OTHER2 0x154
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1A0
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLKC_CNTL 0x1b4
+#define HHI_VPU_CLK_CNTL 0x1bC
+#define HHI_HDMI_CLK_CNTL 0x1CC
+#define HHI_VDEC_CLK_CNTL 0x1E0
+#define HHI_VDEC2_CLK_CNTL 0x1E4
+#define HHI_VDEC3_CLK_CNTL 0x1E8
+#define HHI_VDEC4_CLK_CNTL 0x1EC
+#define HHI_HDCP22_CLK_CNTL 0x1F0
+#define HHI_VAPBCLK_CNTL 0x1F4
+#define HHI_VPU_CLKB_CNTL 0x20C
+#define HHI_GEN_CLK_CNTL 0x228
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
+#define HHI_NAND_CLK_CNTL 0x25C
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+#define HHI_MPLL_CNTL0 0x278
+#define HHI_MPLL_CNTL1 0x27C
+#define HHI_MPLL_CNTL2 0x280
+#define HHI_MPLL_CNTL3 0x284
+#define HHI_MPLL_CNTL4 0x288
+#define HHI_MPLL_CNTL5 0x28c
+#define HHI_MPLL_CNTL6 0x290
+#define HHI_MPLL_CNTL7 0x294
+#define HHI_MPLL_CNTL8 0x298
+#define HHI_FIX_PLL_CNTL0 0x2A0
+#define HHI_FIX_PLL_CNTL1 0x2A4
+#define HHI_FIX_PLL_CNTL3 0x2AC
+#define HHI_SYS_PLL_CNTL0 0x2f4
+#define HHI_SYS_PLL_CNTL1 0x2f8
+#define HHI_SYS_PLL_CNTL2 0x2fc
+#define HHI_SYS_PLL_CNTL3 0x300
+#define HHI_SYS_PLL_CNTL4 0x304
+#define HHI_SYS_PLL_CNTL5 0x308
+#define HHI_SYS_PLL_CNTL6 0x30c
+#define HHI_HDMI_PLL_CNTL0 0x320
+#define HHI_HDMI_PLL_CNTL1 0x324
+#define HHI_HDMI_PLL_CNTL2 0x328
+#define HHI_HDMI_PLL_CNTL3 0x32c
+#define HHI_HDMI_PLL_CNTL4 0x330
+#define HHI_HDMI_PLL_CNTL5 0x334
+#define HHI_HDMI_PLL_CNTL6 0x338
+#define HHI_SPICC_CLK_CNTL 0x3dc
+
+/*
+ * CLKID index values
+ *
+ * These indices are entirely contrived and do not map onto the hardware.
+ * It has now been decided to expose everything by default in the DT header:
+ * include/dt-bindings/clock/g12a-clkc.h. Only the clocks ids we don't want
+ * to expose, such as the internal muxes and dividers of composite clocks,
+ * will remain defined here.
+ */
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
+#define CLKID_SD_EMMC_A_CLK0_SEL 63
+#define CLKID_SD_EMMC_A_CLK0_DIV 64
+#define CLKID_SD_EMMC_B_CLK0_SEL 65
+#define CLKID_SD_EMMC_B_CLK0_DIV 66
+#define CLKID_SD_EMMC_C_CLK0_SEL 67
+#define CLKID_SD_EMMC_C_CLK0_DIV 68
+#define CLKID_MPLL0_DIV 69
+#define CLKID_MPLL1_DIV 70
+#define CLKID_MPLL2_DIV 71
+#define CLKID_MPLL3_DIV 72
+#define CLKID_MPLL_PREDIV 73
+#define CLKID_FCLK_DIV2_DIV 75
+#define CLKID_FCLK_DIV3_DIV 76
+#define CLKID_FCLK_DIV4_DIV 77
+#define CLKID_FCLK_DIV5_DIV 78
+#define CLKID_FCLK_DIV7_DIV 79
+#define CLKID_FCLK_DIV2P5_DIV 100
+#define CLKID_FIXED_PLL_DCO 101
+#define CLKID_SYS_PLL_DCO 102
+#define CLKID_GP0_PLL_DCO 103
+#define CLKID_HIFI_PLL_DCO 104
+#define CLKID_VPU_0_DIV 111
+#define CLKID_VPU_1_DIV 114
+#define CLKID_VAPB_0_DIV 118
+#define CLKID_VAPB_1_DIV 121
+#define CLKID_HDMI_PLL_DCO 125
+#define CLKID_HDMI_PLL_OD 126
+#define CLKID_HDMI_PLL_OD2 127
+#define CLKID_VID_PLL_SEL 130
+#define CLKID_VID_PLL_DIV 131
+#define CLKID_VCLK_SEL 132
+#define CLKID_VCLK2_SEL 133
+#define CLKID_VCLK_INPUT 134
+#define CLKID_VCLK2_INPUT 135
+#define CLKID_VCLK_DIV 136
+#define CLKID_VCLK2_DIV 137
+#define CLKID_VCLK_DIV2_EN 140
+#define CLKID_VCLK_DIV4_EN 141
+#define CLKID_VCLK_DIV6_EN 142
+#define CLKID_VCLK_DIV12_EN 143
+#define CLKID_VCLK2_DIV2_EN 144
+#define CLKID_VCLK2_DIV4_EN 145
+#define CLKID_VCLK2_DIV6_EN 146
+#define CLKID_VCLK2_DIV12_EN 147
+#define CLKID_CTS_ENCI_SEL 158
+#define CLKID_CTS_ENCP_SEL 159
+#define CLKID_CTS_VDAC_SEL 160
+#define CLKID_HDMI_TX_SEL 161
+#define CLKID_HDMI_SEL 166
+#define CLKID_HDMI_DIV 167
+#define CLKID_MALI_0_DIV 170
+#define CLKID_MALI_1_DIV 173
+#define CLKID_MPLL_5OM_DIV 176
+
+#define NR_CLKS 178
+
+/* include the CLKIDs that have been made part of the DT binding */
+#include <dt-bindings/clock/g12a-clkc.h>
+
+#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c
deleted file mode 100644
index 680467141a1d..000000000000
--- a/drivers/clk/meson/gxbb-aoclk-32k.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2017 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include "gxbb-aoclk.h"
-
-/*
- * The AO Domain embeds a dual/divider to generate a more precise
- * 32,768KHz clock for low-power suspend mode and CEC.
- * ______ ______
- * | | | |
- * ______ | Div1 |-| Cnt1 | ______
- * | | /|______| |______|\ | |
- * Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
- * |______| | \| | | |/ | |______|
- * | | Div2 |-| Cnt2 | |
- * | |______| |______| |
- * |_______________________|
- *
- * The dividing can be switched to single or dual, with a counter
- * for each divider to set when the switching is done.
- * The entire dividing mechanism can be also bypassed.
- */
-
-#define CLK_CNTL0_N1_MASK GENMASK(11, 0)
-#define CLK_CNTL0_N2_MASK GENMASK(23, 12)
-#define CLK_CNTL0_DUALDIV_EN BIT(28)
-#define CLK_CNTL0_OUT_GATE_EN BIT(30)
-#define CLK_CNTL0_IN_GATE_EN BIT(31)
-
-#define CLK_CNTL1_M1_MASK GENMASK(11, 0)
-#define CLK_CNTL1_M2_MASK GENMASK(23, 12)
-#define CLK_CNTL1_BYPASS_EN BIT(24)
-#define CLK_CNTL1_SELECT_OSC BIT(27)
-
-#define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10)
-
-struct cec_32k_freq_table {
- unsigned long parent_rate;
- unsigned long target_rate;
- bool dualdiv;
- unsigned int n1;
- unsigned int n2;
- unsigned int m1;
- unsigned int m2;
-};
-
-static const struct cec_32k_freq_table aoclk_cec_32k_table[] = {
- [0] = {
- .parent_rate = 24000000,
- .target_rate = 32768,
- .dualdiv = true,
- .n1 = 733,
- .n2 = 732,
- .m1 = 8,
- .m2 = 11,
- },
-};
-
-/*
- * If CLK_CNTL0_DUALDIV_EN == 0
- * - will use N1 divider only
- * If CLK_CNTL0_DUALDIV_EN == 1
- * - hold M1 cycles of N1 divider then changes to N2
- * - hold M2 cycles of N2 divider then changes to N1
- * Then we can get more accurate division.
- */
-static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
- unsigned long n1;
- u32 reg0, reg1;
-
- regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, &reg0);
- regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, &reg1);
-
- if (reg1 & CLK_CNTL1_BYPASS_EN)
- return parent_rate;
-
- if (reg0 & CLK_CNTL0_DUALDIV_EN) {
- unsigned long n2, m1, m2, f1, f2, p1, p2;
-
- n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
- n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1;
-
- m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1;
- m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1;
-
- f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
- f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
-
- p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
- p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
-
- return DIV_ROUND_UP(100000000, p1 + p2);
- }
-
- n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
-
- return DIV_ROUND_CLOSEST(parent_rate, n1);
-}
-
-static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate,
- unsigned long prate)
-{
- int i;
-
- for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i)
- if (aoclk_cec_32k_table[i].parent_rate == prate &&
- aoclk_cec_32k_table[i].target_rate == rate)
- return &aoclk_cec_32k_table[i];
-
- return NULL;
-}
-
-static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
- *prate);
-
- /* If invalid return first one */
- if (!freq)
- return aoclk_cec_32k_table[0].target_rate;
-
- return freq->target_rate;
-}
-
-/*
- * From the Amlogic init procedure, the IN and OUT gates needs to be handled
- * in the init procedure to avoid any glitches.
- */
-
-static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
- parent_rate);
- struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
- u32 reg = 0;
-
- if (!freq)
- return -EINVAL;
-
- /* Disable clock */
- regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
- CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0);
-
- reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1);
- if (freq->dualdiv)
- reg |= CLK_CNTL0_DUALDIV_EN |
- FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1);
-
- regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg);
-
- reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1);
- if (freq->dualdiv)
- reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1);
-
- regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg);
-
- /* Enable clock */
- regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
- CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN);
-
- udelay(200);
-
- regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
- CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN);
-
- regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1,
- CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC);
-
- /* Select 32k from XTAL */
- regmap_update_bits(cec_32k->regmap,
- AO_RTI_PWR_CNTL_REG0,
- PWR_CNTL_ALT_32K_SEL,
- FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4));
-
- return 0;
-}
-
-const struct clk_ops meson_aoclk_cec_32k_ops = {
- .recalc_rate = aoclk_cec_32k_recalc_rate,
- .round_rate = aoclk_cec_32k_round_rate,
- .set_rate = aoclk_cec_32k_set_rate,
-};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 42ed61d3c3fb..449f6ac189d8 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -5,10 +5,23 @@
*/
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
-#include "clk-regmap.h"
#include "meson-aoclk.h"
#include "gxbb-aoclk.h"
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+#define IN_PREFIX "ao-in-"
+
+/* AO Configuration Clock registers offsets */
+#define AO_RTI_PWR_CNTL_REG1 0x0c
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_OSCIN_CNTL 0x58
+#define AO_CRT_CLK_CNTL1 0x68
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
#define GXBB_AO_GATE(_name, _bit) \
static struct clk_regmap _name##_ao = { \
.data = &(struct clk_regmap_gate_data) { \
@@ -18,7 +31,7 @@ static struct clk_regmap _name##_ao = { \
.hw.init = &(struct clk_init_data) { \
.name = #_name "_ao", \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ "clk81" }, \
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -31,13 +44,174 @@ GXBB_AO_GATE(uart1, 3);
GXBB_AO_GATE(uart2, 5);
GXBB_AO_GATE(ir_blaster, 6);
-static struct aoclk_cec_32k cec_32k_ao = {
- .hw.init = &(struct clk_init_data) {
- .name = "cec_32k_ao",
- .ops = &meson_aoclk_cec_32k_ops,
- .parent_names = (const char *[]){ "xtal" },
+static struct clk_regmap ao_cts_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .bit_idx = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_cts_oscin",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap ao_32k_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "ao_cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
+ {
+ .dual = 1,
+ .n1 = 733,
+ .m1 = 8,
+ .n2 = 732,
+ .m2 = 11,
+ }, {}
+};
+
+static struct clk_regmap ao_32k_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = gxbb_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "ao_32k_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap ao_32k_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTC_ALT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "ao_32k_div",
+ "ao_32k_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_32k = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "ao_32k_sel" },
.num_parents = 1,
- .flags = CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_cts_rtc_oscin = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x7,
+ .shift = 10,
+ .table = (u32[]){ 1, 2, 3, 4 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_cts_rtc_oscin",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "ext-32k-0",
+ IN_PREFIX "ext-32k-1",
+ IN_PREFIX "ext-32k-2",
+ "ao_32k" },
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_clk81 = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_clk81",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
+ "ao_cts_rtc_oscin" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_cts_cec = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_CRT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 27,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_cts_cec",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * FIXME: The 'fixme' parent obviously does not exist.
+ *
+ * ATM, CCF won't call get_parent() if num_parents is 1. It
+ * does not allow NULL as a parent name either.
+ *
+ * On this particular mux, we only know the input #1 parent
+ * but, on boot, unknown input #0 is set, so it is critical
+ * to call .get_parent() on it
+ *
+ * Until CCF gets fixed, adding this fake parent that won't
+ * ever be registered should work around the problem
+ */
+ .parent_names = (const char *[]){ "fixme",
+ "ao_cts_rtc_oscin" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -50,13 +224,21 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *gxbb_aoclk_gate[] = {
- [CLKID_AO_REMOTE] = &remote_ao,
- [CLKID_AO_I2C_MASTER] = &i2c_master_ao,
- [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao,
- [CLKID_AO_UART1] = &uart1_ao,
- [CLKID_AO_UART2] = &uart2_ao,
- [CLKID_AO_IR_BLASTER] = &ir_blaster_ao,
+static struct clk_regmap *gxbb_aoclk[] = {
+ &remote_ao,
+ &i2c_master_ao,
+ &i2c_slave_ao,
+ &uart1_ao,
+ &uart2_ao,
+ &ir_blaster_ao,
+ &ao_cts_oscin,
+ &ao_32k_pre,
+ &ao_32k_div,
+ &ao_32k_sel,
+ &ao_32k,
+ &ao_cts_rtc_oscin,
+ &ao_clk81,
+ &ao_cts_cec,
};
static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
@@ -67,52 +249,38 @@ static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
[CLKID_AO_UART1] = &uart1_ao.hw,
[CLKID_AO_UART2] = &uart2_ao.hw,
[CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
- [CLKID_AO_CEC_32K] = &cec_32k_ao.hw,
+ [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
+ [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
+ [CLKID_AO_32K] = &ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
+ [CLKID_AO_CLK81] = &ao_clk81.hw,
},
.num = NR_CLKS,
};
-static int gxbb_register_cec_ao_32k(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- int ret;
-
- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
- if (IS_ERR(regmap)) {
- dev_err(dev, "failed to get regmap\n");
- return PTR_ERR(regmap);
- }
-
- /* Specific clocks */
- cec_32k_ao.regmap = regmap;
- ret = devm_clk_hw_register(dev, &cec_32k_ao.hw);
- if (ret) {
- dev_err(&pdev->dev, "clk cec_32k_ao register failed.\n");
- return ret;
- }
-
- return 0;
-}
+static const struct meson_aoclk_input gxbb_aoclk_inputs[] = {
+ { .name = "xtal", .required = true, },
+ { .name = "mpeg-clk", .required = true, },
+ {. name = "ext-32k-0", .required = false, },
+ {. name = "ext-32k-1", .required = false, },
+ {. name = "ext-32k-2", .required = false, },
+};
static const struct meson_aoclk_data gxbb_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
.reset = gxbb_aoclk_reset,
- .num_clks = ARRAY_SIZE(gxbb_aoclk_gate),
- .clks = gxbb_aoclk_gate,
+ .num_clks = ARRAY_SIZE(gxbb_aoclk),
+ .clks = gxbb_aoclk,
.hw_data = &gxbb_aoclk_onecell_data,
+ .inputs = gxbb_aoclk_inputs,
+ .num_inputs = ARRAY_SIZE(gxbb_aoclk_inputs),
+ .input_prefix = IN_PREFIX,
};
-static int gxbb_aoclkc_probe(struct platform_device *pdev)
-{
- int ret = gxbb_register_cec_ao_32k(pdev);
- if (ret)
- return ret;
-
- return meson_aoclkc_probe(pdev);
-}
-
static const struct of_device_id gxbb_aoclkc_match_table[] = {
{
.compatible = "amlogic,meson-gx-aoclkc",
@@ -122,7 +290,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = {
};
static struct platform_driver gxbb_aoclkc_driver = {
- .probe = gxbb_aoclkc_probe,
+ .probe = meson_aoclkc_probe,
.driver = {
.name = "gxbb-aoclkc",
.of_match_table = gxbb_aoclkc_match_table,
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index c514493d989a..1db16f9b37d4 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -7,25 +7,7 @@
#ifndef __GXBB_AOCLKC_H
#define __GXBB_AOCLKC_H
-#define NR_CLKS 7
-
-/* AO Configuration Clock registers offsets */
-#define AO_RTI_PWR_CNTL_REG1 0x0c
-#define AO_RTI_PWR_CNTL_REG0 0x10
-#define AO_RTI_GEN_CNTL_REG0 0x40
-#define AO_OSCIN_CNTL 0x58
-#define AO_CRT_CLK_CNTL1 0x68
-#define AO_RTC_ALT_CLK_CNTL0 0x94
-#define AO_RTC_ALT_CLK_CNTL1 0x98
-
-struct aoclk_cec_32k {
- struct clk_hw hw;
- struct regmap *regmap;
-};
-
-#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
-
-extern const struct clk_ops meson_aoclk_cec_32k_ops;
+#define NR_CLKS 14
#include <dt-bindings/clock/gxbb-aoclkc.h>
#include <dt-bindings/reset/gxbb-aoclkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 65f2599e5243..04df2e208ed6 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -4,17 +4,20 @@
* Michael Turquette <mturquette@baylibre.com>
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include "clkc.h"
#include "gxbb.h"
+#include "clk-input.h"
#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-mpll.h"
+#include "meson-eeclk.h"
+#include "vid-pll-div.h"
+
+#define IN_PREFIX "ee-in-"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -118,7 +121,7 @@ static struct clk_regmap gxbb_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -148,7 +151,7 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_pre_mult",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -241,7 +244,7 @@ static struct clk_regmap gxl_hdmi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
/*
* Display directly handle hdmi pll registers ATM, we need
@@ -378,7 +381,7 @@ static struct clk_regmap gxbb_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -439,7 +442,7 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -491,7 +494,7 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -789,7 +792,7 @@ static struct clk_regmap gxbb_mpll2 = {
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
- "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
@@ -852,7 +855,7 @@ static struct clk_regmap gxbb_sar_adc_clk_sel = {
.name = "sar_adc_clk_sel",
.ops = &clk_regmap_mux_ops,
/* NOTE: The datasheet doesn't list the parents for bit 10 */
- .parent_names = (const char *[]){ "xtal", "clk81", },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal", "clk81", },
.num_parents = 2,
},
};
@@ -891,7 +894,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
*/
static const char * const gxbb_mali_0_1_parent_names[] = {
- "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
+ IN_PREFIX "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
"fclk_div4", "fclk_div3", "fclk_div5"
};
@@ -1153,7 +1156,7 @@ static struct clk_regmap gxbb_32k_clk = {
};
static const char * const gxbb_32k_clk_parent_names[] = {
- "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
+ IN_PREFIX "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
};
static struct clk_regmap gxbb_32k_clk_sel = {
@@ -1172,7 +1175,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
};
static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
- "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+ IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
/*
* Following these parent clocks, we should also have had mpll2, mpll3
@@ -2138,7 +2141,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
static const char * const gxbb_hdmi_parent_names[] = {
- "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+ IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
};
static struct clk_regmap gxbb_hdmi_sel = {
@@ -2285,7 +2288,7 @@ static struct clk_regmap gxbb_vdec_hevc = {
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
static const char * const gen_clk_parent_names[] = {
- "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
+ IN_PREFIX "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
};
@@ -2854,6 +2857,192 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
};
static struct clk_regmap *const gxbb_clk_regmaps[] = {
+ &gxbb_clk81,
+ &gxbb_ddr,
+ &gxbb_dos,
+ &gxbb_isa,
+ &gxbb_pl301,
+ &gxbb_periphs,
+ &gxbb_spicc,
+ &gxbb_i2c,
+ &gxbb_sar_adc,
+ &gxbb_smart_card,
+ &gxbb_rng0,
+ &gxbb_uart0,
+ &gxbb_sdhc,
+ &gxbb_stream,
+ &gxbb_async_fifo,
+ &gxbb_sdio,
+ &gxbb_abuf,
+ &gxbb_hiu_iface,
+ &gxbb_assist_misc,
+ &gxbb_spi,
+ &gxbb_i2s_spdif,
+ &gxbb_eth,
+ &gxbb_demux,
+ &gxbb_aiu_glue,
+ &gxbb_iec958,
+ &gxbb_i2s_out,
+ &gxbb_amclk,
+ &gxbb_aififo2,
+ &gxbb_mixer,
+ &gxbb_mixer_iface,
+ &gxbb_adc,
+ &gxbb_blkmv,
+ &gxbb_aiu,
+ &gxbb_uart1,
+ &gxbb_g2d,
+ &gxbb_usb0,
+ &gxbb_usb1,
+ &gxbb_reset,
+ &gxbb_nand,
+ &gxbb_dos_parser,
+ &gxbb_usb,
+ &gxbb_vdin1,
+ &gxbb_ahb_arb0,
+ &gxbb_efuse,
+ &gxbb_boot_rom,
+ &gxbb_ahb_data_bus,
+ &gxbb_ahb_ctrl_bus,
+ &gxbb_hdmi_intr_sync,
+ &gxbb_hdmi_pclk,
+ &gxbb_usb1_ddr_bridge,
+ &gxbb_usb0_ddr_bridge,
+ &gxbb_mmc_pclk,
+ &gxbb_dvin,
+ &gxbb_uart2,
+ &gxbb_sana,
+ &gxbb_vpu_intr,
+ &gxbb_sec_ahb_ahb3_bridge,
+ &gxbb_clk81_a53,
+ &gxbb_vclk2_venci0,
+ &gxbb_vclk2_venci1,
+ &gxbb_vclk2_vencp0,
+ &gxbb_vclk2_vencp1,
+ &gxbb_gclk_venci_int0,
+ &gxbb_gclk_vencp_int,
+ &gxbb_dac_clk,
+ &gxbb_aoclk_gate,
+ &gxbb_iec958_gate,
+ &gxbb_enc480p,
+ &gxbb_rng1,
+ &gxbb_gclk_venci_int1,
+ &gxbb_vclk2_venclmcc,
+ &gxbb_vclk2_vencl,
+ &gxbb_vclk_other,
+ &gxbb_edp,
+ &gxbb_ao_media_cpu,
+ &gxbb_ao_ahb_sram,
+ &gxbb_ao_ahb_bus,
+ &gxbb_ao_iface,
+ &gxbb_ao_i2c,
+ &gxbb_emmc_a,
+ &gxbb_emmc_b,
+ &gxbb_emmc_c,
+ &gxbb_sar_adc_clk,
+ &gxbb_mali_0,
+ &gxbb_mali_1,
+ &gxbb_cts_amclk,
+ &gxbb_cts_mclk_i958,
+ &gxbb_32k_clk,
+ &gxbb_sd_emmc_a_clk0,
+ &gxbb_sd_emmc_b_clk0,
+ &gxbb_sd_emmc_c_clk0,
+ &gxbb_vpu_0,
+ &gxbb_vpu_1,
+ &gxbb_vapb_0,
+ &gxbb_vapb_1,
+ &gxbb_vapb,
+ &gxbb_mpeg_clk_div,
+ &gxbb_sar_adc_clk_div,
+ &gxbb_mali_0_div,
+ &gxbb_mali_1_div,
+ &gxbb_cts_mclk_i958_div,
+ &gxbb_32k_clk_div,
+ &gxbb_sd_emmc_a_clk0_div,
+ &gxbb_sd_emmc_b_clk0_div,
+ &gxbb_sd_emmc_c_clk0_div,
+ &gxbb_vpu_0_div,
+ &gxbb_vpu_1_div,
+ &gxbb_vapb_0_div,
+ &gxbb_vapb_1_div,
+ &gxbb_mpeg_clk_sel,
+ &gxbb_sar_adc_clk_sel,
+ &gxbb_mali_0_sel,
+ &gxbb_mali_1_sel,
+ &gxbb_mali,
+ &gxbb_cts_amclk_sel,
+ &gxbb_cts_mclk_i958_sel,
+ &gxbb_cts_i958,
+ &gxbb_32k_clk_sel,
+ &gxbb_sd_emmc_a_clk0_sel,
+ &gxbb_sd_emmc_b_clk0_sel,
+ &gxbb_sd_emmc_c_clk0_sel,
+ &gxbb_vpu_0_sel,
+ &gxbb_vpu_1_sel,
+ &gxbb_vpu,
+ &gxbb_vapb_0_sel,
+ &gxbb_vapb_1_sel,
+ &gxbb_vapb_sel,
+ &gxbb_mpll0,
+ &gxbb_mpll1,
+ &gxbb_mpll2,
+ &gxbb_mpll0_div,
+ &gxbb_mpll1_div,
+ &gxbb_mpll2_div,
+ &gxbb_cts_amclk_div,
+ &gxbb_fixed_pll,
+ &gxbb_sys_pll,
+ &gxbb_mpll_prediv,
+ &gxbb_fclk_div2,
+ &gxbb_fclk_div3,
+ &gxbb_fclk_div4,
+ &gxbb_fclk_div5,
+ &gxbb_fclk_div7,
+ &gxbb_vdec_1_sel,
+ &gxbb_vdec_1_div,
+ &gxbb_vdec_1,
+ &gxbb_vdec_hevc_sel,
+ &gxbb_vdec_hevc_div,
+ &gxbb_vdec_hevc,
+ &gxbb_gen_clk_sel,
+ &gxbb_gen_clk_div,
+ &gxbb_gen_clk,
+ &gxbb_fixed_pll_dco,
+ &gxbb_sys_pll_dco,
+ &gxbb_gp0_pll,
+ &gxbb_vid_pll,
+ &gxbb_vid_pll_sel,
+ &gxbb_vid_pll_div,
+ &gxbb_vclk,
+ &gxbb_vclk_sel,
+ &gxbb_vclk_div,
+ &gxbb_vclk_input,
+ &gxbb_vclk_div1,
+ &gxbb_vclk_div2_en,
+ &gxbb_vclk_div4_en,
+ &gxbb_vclk_div6_en,
+ &gxbb_vclk_div12_en,
+ &gxbb_vclk2,
+ &gxbb_vclk2_sel,
+ &gxbb_vclk2_div,
+ &gxbb_vclk2_input,
+ &gxbb_vclk2_div1,
+ &gxbb_vclk2_div2_en,
+ &gxbb_vclk2_div4_en,
+ &gxbb_vclk2_div6_en,
+ &gxbb_vclk2_div12_en,
+ &gxbb_cts_enci,
+ &gxbb_cts_enci_sel,
+ &gxbb_cts_encp,
+ &gxbb_cts_encp_sel,
+ &gxbb_cts_vdac,
+ &gxbb_cts_vdac_sel,
+ &gxbb_hdmi_tx,
+ &gxbb_hdmi_tx_sel,
+ &gxbb_hdmi_sel,
+ &gxbb_hdmi_div,
+ &gxbb_hdmi,
&gxbb_gp0_pll_dco,
&gxbb_hdmi_pll,
&gxbb_hdmi_pll_od,
@@ -2862,14 +3051,6 @@ static struct clk_regmap *const gxbb_clk_regmaps[] = {
};
static struct clk_regmap *const gxl_clk_regmaps[] = {
- &gxl_gp0_pll_dco,
- &gxl_hdmi_pll,
- &gxl_hdmi_pll_od,
- &gxl_hdmi_pll_od2,
- &gxl_hdmi_pll_dco,
-};
-
-static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_clk81,
&gxbb_ddr,
&gxbb_dos,
@@ -3056,23 +3237,22 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_hdmi_sel,
&gxbb_hdmi_div,
&gxbb_hdmi,
+ &gxl_gp0_pll_dco,
+ &gxl_hdmi_pll,
+ &gxl_hdmi_pll_od,
+ &gxl_hdmi_pll_od2,
+ &gxl_hdmi_pll_dco,
};
-struct clkc_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clks_count;
- struct clk_hw_onecell_data *hw_onecell_data;
-};
-
-static const struct clkc_data gxbb_clkc_data = {
+static const struct meson_eeclkc_data gxbb_clkc_data = {
.regmap_clks = gxbb_clk_regmaps,
- .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps),
+ .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
.hw_onecell_data = &gxbb_hw_onecell_data,
};
-static const struct clkc_data gxl_clkc_data = {
+static const struct meson_eeclkc_data gxl_clkc_data = {
.regmap_clks = gxl_clk_regmaps,
- .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps),
+ .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
.hw_onecell_data = &gxl_hw_onecell_data,
};
@@ -3082,52 +3262,8 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
-static int gxbb_clkc_probe(struct platform_device *pdev)
-{
- const struct clkc_data *clkc_data;
- struct regmap *map;
- int ret, i;
- struct device *dev = &pdev->dev;
-
- clkc_data = of_device_get_match_data(dev);
- if (!clkc_data)
- return -EINVAL;
-
- /* Get the hhi system controller node if available */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
- if (IS_ERR(map)) {
- dev_err(dev, "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- /* Populate regmap for the common regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++)
- gx_clk_regmaps[i]->map = map;
-
- /* Populate regmap for soc specific clocks */
- for (i = 0; i < clkc_data->regmap_clks_count; i++)
- clkc_data->regmap_clks[i]->map = map;
-
- /* Register all clks */
- for (i = 0; i < clkc_data->hw_onecell_data->num; i++) {
- /* array might be sparse */
- if (!clkc_data->hw_onecell_data->hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev,
- clkc_data->hw_onecell_data->hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- clkc_data->hw_onecell_data);
-}
-
static struct platform_driver gxbb_driver = {
- .probe = gxbb_clkc_probe,
+ .probe = meson_eeclkc_probe,
.driver = {
.name = "gxbb-clkc",
.of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index f965845917e3..b67951909e04 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -14,9 +14,11 @@
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include "clk-regmap.h"
+#include <linux/slab.h>
#include "meson-aoclk.h"
+#include "clk-input.h"
+
static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -31,6 +33,37 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
.reset = meson_aoclk_do_reset,
};
+static int meson_aoclkc_register_inputs(struct device *dev,
+ struct meson_aoclk_data *data)
+{
+ struct clk_hw *hw;
+ char *str;
+ int i;
+
+ for (i = 0; i < data->num_inputs; i++) {
+ const struct meson_aoclk_input *in = &data->inputs[i];
+
+ str = kasprintf(GFP_KERNEL, "%s%s", data->input_prefix,
+ in->name);
+ if (!str)
+ return -ENOMEM;
+
+ hw = meson_clk_hw_register_input(dev, in->name, str, 0);
+ kfree(str);
+
+ if (IS_ERR(hw)) {
+ if (!in->required && PTR_ERR(hw) == -ENOENT)
+ continue;
+ else if (PTR_ERR(hw) != -EPROBE_DEFER)
+ dev_err(dev, "failed to register input %s\n",
+ in->name);
+ return PTR_ERR(hw);
+ }
+ }
+
+ return 0;
+}
+
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
@@ -53,6 +86,10 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
+ ret = meson_aoclkc_register_inputs(dev, data);
+ if (ret)
+ return ret;
+
/* Reset Controller */
rstc->data = data;
rstc->regmap = regmap;
@@ -65,15 +102,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /*
- * Populate regmap and register all clks
- */
- for (clkid = 0; clkid < data->num_clks; clkid++) {
+ /* Populate regmap */
+ for (clkid = 0; clkid < data->num_clks; clkid++)
data->clks[clkid]->map = regmap;
+ /* Register all clks */
+ for (clkid = 0; clkid < data->hw_data->num; clkid++) {
+ if (!data->hw_data->hws[clkid])
+ continue;
+
ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
- if (ret)
+ if (ret) {
+ dev_err(dev, "Clock registration failed\n");
return ret;
+ }
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index ab2819e88922..999cde3868f7 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -11,16 +11,27 @@
#ifndef __MESON_AOCLK_H__
#define __MESON_AOCLK_H__
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset-controller.h>
+
#include "clk-regmap.h"
+struct meson_aoclk_input {
+ const char *name;
+ bool required;
+};
+
struct meson_aoclk_data {
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- int num_clks;
+ const int num_clks;
struct clk_regmap **clks;
+ const int num_inputs;
+ const struct meson_aoclk_input *inputs;
+ const char *input_prefix;
const struct clk_hw_onecell_data *hw_data;
};
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
new file mode 100644
index 000000000000..37a34c9c3885
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "clk-input.h"
+#include "clk-regmap.h"
+#include "meson-eeclk.h"
+
+int meson_eeclkc_probe(struct platform_device *pdev)
+{
+ const struct meson_eeclkc_data *data;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *input;
+ struct regmap *map;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ /* Get the hhi system controller node */
+ map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "failed to get HHI regmap\n");
+ return PTR_ERR(map);
+ }
+
+ input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0);
+ if (IS_ERR(input)) {
+ ret = PTR_ERR(input);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get input clock");
+ return ret;
+ }
+
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < data->regmap_clk_num; i++)
+ data->regmap_clks[i]->map = map;
+
+ for (i = 0; i < data->hw_onecell_data->num; i++) {
+ /* array might be sparse */
+ if (!data->hw_onecell_data->hws[i])
+ continue;
+
+ ret = devm_clk_hw_register(dev, data->hw_onecell_data->hws[i]);
+ if (ret) {
+ dev_err(dev, "Clock registration failed\n");
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ data->hw_onecell_data);
+}
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
new file mode 100644
index 000000000000..1b809b1419fe
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLKC_H
+#define __MESON_CLKC_H
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+#define IN_PREFIX "ee-in-"
+
+struct platform_device;
+
+struct meson_eeclkc_data {
+ struct clk_regmap *const *regmap_clks;
+ unsigned int regmap_clk_num;
+ struct clk_hw_onecell_data *hw_onecell_data;
+};
+
+int meson_eeclkc_probe(struct platform_device *pdev);
+
+#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 950d0e548c75..576ad42252d0 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,9 +16,10 @@
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "clkc.h"
#include "meson8b.h"
#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-mpll.h"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -803,16 +804,16 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
},
};
-static u32 mux_table_abp[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_abp_clk_sel = {
+static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
+static struct clk_regmap meson8b_apb_clk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 3,
- .table = mux_table_abp,
+ .table = mux_table_apb,
},
.hw.init = &(struct clk_init_data){
- .name = "abp_clk_sel",
+ .name = "apb_clk_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "cpu_clk_div2",
"cpu_clk_div3",
@@ -825,16 +826,16 @@ static struct clk_regmap meson8b_abp_clk_sel = {
},
};
-static struct clk_regmap meson8b_abp_clk_gate = {
+static struct clk_regmap meson8b_apb_clk_gate = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 16,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "abp_clk_dis",
+ .name = "apb_clk_dis",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "abp_clk_sel" },
+ .parent_names = (const char *[]){ "apb_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1573,6 +1574,135 @@ static struct clk_regmap meson8b_hdmi_sys = {
},
};
+/*
+ * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
+ * has mali_0 and no glitch-free mux.
+ */
+static const char * const meson8b_mali_0_1_parent_names[] = {
+ "xtal", "mpll2", "mpll1", "fclk_div7", "fclk_div4", "fclk_div3",
+ "fclk_div5"
+};
+
+static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+
+static struct clk_regmap meson8b_mali_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .table = meson8b_mali_0_1_mux_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_mali_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
+ /*
+ * Don't propagate rate changes up because the only changeable
+ * parents are mpll1 and mpll2 but we need those for audio and
+ * RGMII (Ethernet). We don't want to change the audio or
+ * Ethernet clocks when setting the GPU frequency.
+ */
+ .flags = 0,
+ },
+};
+
+static struct clk_regmap meson8b_mali_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ .table = meson8b_mali_0_1_mux_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_mali_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
+ /*
+ * Don't propagate rate changes up because the only changeable
+ * parents are mpll1 and mpll2 but we need those for audio and
+ * RGMII (Ethernet). We don't want to change the audio or
+ * Ethernet clocks when setting the GPU frequency.
+ */
+ .flags = 0,
+ },
+};
+
+static struct clk_regmap meson8b_mali_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "mali_0", "mali_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -1659,6 +1789,188 @@ static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+static struct clk_hw_onecell_data meson8_hw_onecell_data = {
+ .hws = {
+ [CLKID_XTAL] = &meson8b_xtal.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_DDR] = &meson8b_ddr.hw,
+ [CLKID_DOS] = &meson8b_dos.hw,
+ [CLKID_ISA] = &meson8b_isa.hw,
+ [CLKID_PL301] = &meson8b_pl301.hw,
+ [CLKID_PERIPHS] = &meson8b_periphs.hw,
+ [CLKID_SPICC] = &meson8b_spicc.hw,
+ [CLKID_I2C] = &meson8b_i2c.hw,
+ [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
+ [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
+ [CLKID_RNG0] = &meson8b_rng0.hw,
+ [CLKID_UART0] = &meson8b_uart0.hw,
+ [CLKID_SDHC] = &meson8b_sdhc.hw,
+ [CLKID_STREAM] = &meson8b_stream.hw,
+ [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
+ [CLKID_SDIO] = &meson8b_sdio.hw,
+ [CLKID_ABUF] = &meson8b_abuf.hw,
+ [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
+ [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
+ [CLKID_SPI] = &meson8b_spi.hw,
+ [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
+ [CLKID_ETH] = &meson8b_eth.hw,
+ [CLKID_DEMUX] = &meson8b_demux.hw,
+ [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
+ [CLKID_IEC958] = &meson8b_iec958.hw,
+ [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
+ [CLKID_AMCLK] = &meson8b_amclk.hw,
+ [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
+ [CLKID_MIXER] = &meson8b_mixer.hw,
+ [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
+ [CLKID_ADC] = &meson8b_adc.hw,
+ [CLKID_BLKMV] = &meson8b_blkmv.hw,
+ [CLKID_AIU] = &meson8b_aiu.hw,
+ [CLKID_UART1] = &meson8b_uart1.hw,
+ [CLKID_G2D] = &meson8b_g2d.hw,
+ [CLKID_USB0] = &meson8b_usb0.hw,
+ [CLKID_USB1] = &meson8b_usb1.hw,
+ [CLKID_RESET] = &meson8b_reset.hw,
+ [CLKID_NAND] = &meson8b_nand.hw,
+ [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
+ [CLKID_USB] = &meson8b_usb.hw,
+ [CLKID_VDIN1] = &meson8b_vdin1.hw,
+ [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
+ [CLKID_EFUSE] = &meson8b_efuse.hw,
+ [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
+ [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
+ [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
+ [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
+ [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
+ [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
+ [CLKID_DVIN] = &meson8b_dvin.hw,
+ [CLKID_UART2] = &meson8b_uart2.hw,
+ [CLKID_SANA] = &meson8b_sana.hw,
+ [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
+ [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
+ [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
+ [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
+ [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
+ [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
+ [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
+ [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
+ [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
+ [CLKID_ENC480P] = &meson8b_enc480p.hw,
+ [CLKID_RNG1] = &meson8b_rng1.hw,
+ [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
+ [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
+ [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
+ [CLKID_EDP] = &meson8b_edp.hw,
+ [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
+ [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
+ [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
+ [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
+ [CLKID_MPLL0] = &meson8b_mpll0.hw,
+ [CLKID_MPLL1] = &meson8b_mpll1.hw,
+ [CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
+ [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
+ [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
+ [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
+ [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
+ [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
+ [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
+ [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
+ [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
+ [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
+ [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
+ [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
+ [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
+ [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
+ [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
+ [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
+ [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
+ [CLKID_APB] = &meson8b_apb_clk_gate.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
+ [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
+ [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
+ [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
+ [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
+ [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
+ [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
+ [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
+ [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
+ [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
+ [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
+ [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
+ [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
+ [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
+ [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
+ [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
+ [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
+ [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
+ [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
+ [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
+ [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
+ [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
+ [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
+ [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
+ [CLKID_MALI] = &meson8b_mali_0.hw,
+ [CLK_NR_CLKS] = NULL,
+ },
+ .num = CLK_NR_CLKS,
+};
+
static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
.hws = {
[CLKID_XTAL] = &meson8b_xtal.hw,
@@ -1781,8 +2093,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_ABP_SEL] = &meson8b_abp_clk_sel.hw,
- [CLKID_ABP] = &meson8b_abp_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
+ [CLKID_APB] = &meson8b_apb_clk_gate.hw,
[CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
[CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
[CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
@@ -1833,6 +2145,13 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
[CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
[CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
+ [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
+ [CLKID_MALI_0] = &meson8b_mali_0.hw,
+ [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw,
+ [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
+ [CLKID_MALI_1] = &meson8b_mali_1.hw,
+ [CLKID_MALI] = &meson8b_mali.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -1943,8 +2262,8 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_fixed_pll_dco,
&meson8b_hdmi_pll_dco,
&meson8b_sys_pll_dco,
- &meson8b_abp_clk_sel,
- &meson8b_abp_clk_gate,
+ &meson8b_apb_clk_sel,
+ &meson8b_apb_clk_gate,
&meson8b_periph_clk_sel,
&meson8b_periph_clk_gate,
&meson8b_axi_clk_sel,
@@ -1988,6 +2307,13 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_hdmi_sys_sel,
&meson8b_hdmi_sys_div,
&meson8b_hdmi_sys,
+ &meson8b_mali_0_sel,
+ &meson8b_mali_0_div,
+ &meson8b_mali_0,
+ &meson8b_mali_1_sel,
+ &meson8b_mali_1_div,
+ &meson8b_mali_1,
+ &meson8b_mali,
};
static const struct meson8b_clk_reset_line {
@@ -2132,7 +2458,6 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
static struct meson8b_nb_data meson8b_cpu_nb_data = {
.nb.notifier_call = meson8b_cpu_clk_notifier_cb,
- .onecell_data = &meson8b_hw_onecell_data,
};
static const struct regmap_config clkc_regmap_config = {
@@ -2141,7 +2466,8 @@ static const struct regmap_config clkc_regmap_config = {
.reg_stride = 4,
};
-static void __init meson8b_clkc_init(struct device_node *np)
+static void __init meson8b_clkc_init_common(struct device_node *np,
+ struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
const char *notifier_clk_name;
@@ -2192,14 +2518,16 @@ static void __init meson8b_clkc_init(struct device_node *np)
*/
for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
- if (!meson8b_hw_onecell_data.hws[i])
+ if (!clk_hw_onecell_data->hws[i])
continue;
- ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]);
+ ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
if (ret)
return;
}
+ meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
+
/*
* FIXME we shouldn't program the muxes in notifier handlers. The
* tricky programming sequence will be handled by the forthcoming
@@ -2215,13 +2543,23 @@ static void __init meson8b_clkc_init(struct device_node *np)
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &meson8b_hw_onecell_data);
+ clk_hw_onecell_data);
if (ret)
pr_err("%s: failed to register clock provider\n", __func__);
}
+static void __init meson8_clkc_init(struct device_node *np)
+{
+ return meson8b_clkc_init_common(np, &meson8_hw_onecell_data);
+}
+
+static void __init meson8b_clkc_init(struct device_node *np)
+{
+ return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data);
+}
+
CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
- meson8b_clkc_init);
+ meson8_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
meson8b_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 87fba739af81..b8c58faeae52 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -33,6 +33,7 @@
#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
+#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
@@ -91,7 +92,7 @@
#define CLKID_CPU_CLK_DIV6 120
#define CLKID_CPU_CLK_DIV7 121
#define CLKID_CPU_CLK_DIV8 122
-#define CLKID_ABP_SEL 123
+#define CLKID_APB_SEL 123
#define CLKID_PERIPH_SEL 125
#define CLKID_AXI_SEL 127
#define CLKID_L2_DRAM_SEL 129
@@ -139,8 +140,14 @@
#define CLKID_HDMI_SYS_SEL 172
#define CLKID_HDMI_SYS_DIV 173
#define CLKID_HDMI_SYS 174
+#define CLKID_MALI_0_SEL 175
+#define CLKID_MALI_0_DIV 176
+#define CLKID_MALI_0 177
+#define CLKID_MALI_1_SEL 178
+#define CLKID_MALI_1_DIV 179
+#define CLKID_MALI_1 180
-#define CLK_NR_CLKS 175
+#define CLK_NR_CLKS 181
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/meson/parm.h b/drivers/clk/meson/parm.h
new file mode 100644
index 000000000000..3c9ef1b505ce
--- /dev/null
+++ b/drivers/clk/meson/parm.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ */
+
+#ifndef __MESON_PARM_H
+#define __MESON_PARM_H
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+
+#define PMASK(width) GENMASK(width - 1, 0)
+#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
+#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
+
+#define PARM_GET(width, shift, reg) \
+ (((reg) & SETPMASK(width, shift)) >> (shift))
+#define PARM_SET(width, shift, reg, val) \
+ (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
+
+#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
+
+struct parm {
+ u16 reg_off;
+ u8 shift;
+ u8 width;
+};
+
+static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
+{
+ unsigned int val;
+
+ regmap_read(map, p->reg_off, &val);
+ return PARM_GET(p->width, p->shift, val);
+}
+
+static inline void meson_parm_write(struct regmap *map, struct parm *p,
+ unsigned int val)
+{
+ regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
+ val << p->shift);
+}
+
+#endif /* __MESON_PARM_H */
+
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index bc64019b8eeb..3acf03780221 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -16,7 +16,11 @@
* duty_cycle = (1 + hi) / (1 + val)
*/
-#include "clkc-audio.h"
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "sclk-div.h"
static inline struct meson_sclk_div_data *
meson_sclk_div_data(struct clk_regmap *clk)
@@ -241,3 +245,7 @@ const struct clk_ops meson_sclk_div_ops = {
.init = sclk_div_init,
};
EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
+
+MODULE_DESCRIPTION("Amlogic Sample divider driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/sclk-div.h
index 0a7c157ebf81..b64b2a32005f 100644
--- a/drivers/clk/meson/clkc-audio.h
+++ b/drivers/clk/meson/sclk-div.h
@@ -4,16 +4,11 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
-#ifndef __MESON_CLKC_AUDIO_H
-#define __MESON_CLKC_AUDIO_H
+#ifndef __MESON_SCLK_DIV_H
+#define __MESON_SCLK_DIV_H
-#include "clkc.h"
-
-struct meson_clk_triphase_data {
- struct parm ph0;
- struct parm ph1;
- struct parm ph2;
-};
+#include <linux/clk-provider.h>
+#include "parm.h"
struct meson_sclk_div_data {
struct parm div;
@@ -22,7 +17,6 @@ struct meson_sclk_div_data {
struct clk_duty cached_duty;
};
-extern const struct clk_ops meson_clk_triphase_ops;
extern const struct clk_ops meson_sclk_div_ops;
-#endif /* __MESON_CLKC_AUDIO_H */
+#endif /* __MESON_SCLK_DIV_H */
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 88af0e282ea0..08bcc01c0923 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -5,7 +5,10 @@
*/
#include <linux/clk-provider.h>
-#include "clkc.h"
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "vid-pll-div.h"
static inline struct meson_vid_pll_div_data *
meson_vid_pll_div_data(struct clk_regmap *clk)
@@ -89,3 +92,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
const struct clk_ops meson_vid_pll_div_ro_ops = {
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
+EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic video pll divider driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/vid-pll-div.h b/drivers/clk/meson/vid-pll-div.h
new file mode 100644
index 000000000000..c0128e33ccf9
--- /dev/null
+++ b/drivers/clk/meson/vid-pll-div.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_VID_PLL_DIV_H
+#define __MESON_VID_PLL_DIV_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_vid_pll_div_data {
+ struct parm val;
+ struct parm sel;
+};
+
+extern const struct clk_ops meson_vid_pll_div_ro_ops;
+
+#endif /* __MESON_VID_PLL_DIV_H */
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index d083b860f083..a60a1be937ad 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -229,9 +229,10 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
- {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
+ {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock},
{MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
{MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
{MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 7dedfaa6e152..5c6bbee396b3 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -175,8 +175,10 @@ static void __init a370_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &a370_coreclks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, a370_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index e8f03293ec83..fa1568279c23 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -226,7 +226,9 @@ static void __init axp_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &axp_coreclks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, axp_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index e0dd99f36bf4..0bd09d33f9cf 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -188,10 +188,14 @@ static void __init dove_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &dove_coreclks);
- if (ddnp)
+ if (ddnp) {
dove_divider_clk_init(ddnp);
+ of_node_put(ddnp);
+ }
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, dove_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 6f784167bda4..35af3aa18f1c 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -331,6 +331,8 @@ static void __init kirkwood_clk_init(struct device_node *np)
if (cgnp) {
mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc);
+
+ of_node_put(cgnp);
}
}
CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c
index 0a74cf7a7725..1c8ab4f834ba 100644
--- a/drivers/clk/mvebu/mv98dx3236.c
+++ b/drivers/clk/mvebu/mv98dx3236.c
@@ -172,7 +172,9 @@ static void __init mv98dx3236_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &mv98dx3236_core_clocks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index e5eca8a1abe4..c25b57c3cbc8 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -71,7 +71,6 @@ struct src_sel {
* @freq_tbl: frequency table
* @clkr: regmap clock handle
* @lock: register lock
- *
*/
struct clk_rcg {
u32 ns_reg;
@@ -107,7 +106,6 @@ extern const struct clk_ops clk_rcg_lcc_ops;
* @freq_tbl: frequency table
* @clkr: regmap clock handle
* @lock: register lock
- *
*/
struct clk_dyn_rcg {
u32 ns_reg[2];
@@ -140,7 +138,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
* @clkr: regmap clock handle
- *
+ * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
*/
struct clk_rcg2 {
u32 cmd_rcgr;
@@ -150,6 +148,7 @@ struct clk_rcg2 {
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
struct clk_regmap clkr;
+ u8 cfg_off;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 6e3bd195d012..8c02bffe50df 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -41,6 +41,11 @@
#define N_REG 0xc
#define D_REG 0x10
+#define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
+#define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
+#define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
+#define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
+
/* Dynamic Frequency Scaling */
#define MAX_PERF_LEVEL 8
#define SE_CMD_DFSR_OFFSET 0x14
@@ -74,7 +79,7 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
u32 cfg;
int i, ret;
- ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (ret)
goto err;
@@ -123,7 +128,7 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
int ret;
u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
- ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
CFG_SRC_SEL_MASK, cfg);
if (ret)
return ret;
@@ -162,13 +167,13 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (rcg->mnd_width) {
mask = BIT(rcg->mnd_width) - 1;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
m &= mask;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
n = ~n;
n &= mask;
n += m;
@@ -263,17 +268,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + M_REG, mask, f->m);
+ RCG_M_OFFSET(rcg), mask, f->m);
if (ret)
return ret;
ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
if (ret)
return ret;
ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ RCG_D_OFFSET(rcg), mask, ~f->n);
if (ret)
return ret;
}
@@ -284,8 +289,7 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
-
- return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
mask, cfg);
}
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 9f4fc7773fb2..c3fd632af119 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -18,6 +18,31 @@
#define CLK_RPMH_ARC_EN_OFFSET 0
#define CLK_RPMH_VRM_EN_OFFSET 4
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHIFT 29
+#define BCM_TCS_CMD_VOTE_MASK 0x3fff
+#define BCM_TCS_CMD_VOTE_SHIFT 0
+
+#define BCM_TCS_CMD(valid, vote) \
+ (BCM_TCS_CMD_COMMIT_MASK | \
+ ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \
+ ((vote & BCM_TCS_CMD_VOTE_MASK) \
+ << BCM_TCS_CMD_VOTE_SHIFT))
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
+ * @unit: divisor used to convert Hz value to an RPMh msg
+ * @width: multiplier used to convert Hz value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved to pad the struct
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
/**
* struct clk_rpmh - individual rpmh clock data structure
* @hw: handle between common and hardware-specific interfaces
@@ -29,6 +54,7 @@
* @aggr_state: rpmh clock aggregated state
* @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
* @valid_state_mask: mask to determine the state of the rpmh clock
+ * @unit: divisor to convert rate to rpmh msg in magnitudes of Khz
* @dev: device to which it is attached
* @peer: pointer to the clock rpmh sibling
*/
@@ -42,6 +68,7 @@ struct clk_rpmh {
u32 aggr_state;
u32 last_sent_aggr_state;
u32 valid_state_mask;
+ u32 unit;
struct device *dev;
struct clk_rpmh *peer;
};
@@ -98,6 +125,17 @@ static DEFINE_MUTEX(rpmh_clk_lock);
__DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
CLK_RPMH_VRM_EN_OFFSET, 1, _div)
+#define DEFINE_CLK_RPMH_BCM(_platform, _name, _res_name) \
+ static struct clk_rpmh _platform##_##_name = { \
+ .res_name = _res_name, \
+ .valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE), \
+ .div = 1, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_bcm_ops, \
+ .name = #_name, \
+ }, \
+ }
+
static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
{
return container_of(_hw, struct clk_rpmh, hw);
@@ -210,6 +248,96 @@ static const struct clk_ops clk_rpmh_ops = {
.recalc_rate = clk_rpmh_recalc_rate,
};
+static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
+{
+ struct tcs_cmd cmd = { 0 };
+ u32 cmd_state;
+ int ret;
+
+ mutex_lock(&rpmh_clk_lock);
+
+ cmd_state = 0;
+ if (enable) {
+ cmd_state = 1;
+ if (c->aggr_state)
+ cmd_state = c->aggr_state;
+ }
+
+ if (c->last_sent_aggr_state == cmd_state) {
+ mutex_unlock(&rpmh_clk_lock);
+ return 0;
+ }
+
+ cmd.addr = c->res_addr;
+ cmd.data = BCM_TCS_CMD(enable, cmd_state);
+
+ ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
+ if (ret) {
+ dev_err(c->dev, "set active state of %s failed: (%d)\n",
+ c->res_name, ret);
+ mutex_unlock(&rpmh_clk_lock);
+ return ret;
+ }
+
+ c->last_sent_aggr_state = cmd_state;
+
+ mutex_unlock(&rpmh_clk_lock);
+
+ return 0;
+}
+
+static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ return clk_rpmh_bcm_send_cmd(c, true);
+};
+
+static void clk_rpmh_bcm_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ clk_rpmh_bcm_send_cmd(c, false);
+};
+
+static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ c->aggr_state = rate / c->unit;
+ /*
+ * Since any non-zero value sent to hw would result in enabling the
+ * clock, only send the value if the clock has already been prepared.
+ */
+ if (clk_hw_is_prepared(hw))
+ clk_rpmh_bcm_send_cmd(c, true);
+
+ return 0;
+};
+
+static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ return c->aggr_state * c->unit;
+}
+
+static const struct clk_ops clk_rpmh_bcm_ops = {
+ .prepare = clk_rpmh_bcm_prepare,
+ .unprepare = clk_rpmh_bcm_unprepare,
+ .set_rate = clk_rpmh_bcm_set_rate,
+ .round_rate = clk_rpmh_round_rate,
+ .recalc_rate = clk_rpmh_bcm_recalc_rate,
+};
+
/* Resource name must match resource id present in cmd-db. */
DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
@@ -217,6 +345,7 @@ DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
@@ -231,6 +360,7 @@ static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
[RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
[RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
@@ -267,6 +397,8 @@ static int clk_rpmh_probe(struct platform_device *pdev)
for (i = 0; i < desc->num_clks; i++) {
u32 res_addr;
+ size_t aux_data_len;
+ const struct bcm_db *data;
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
@@ -275,6 +407,20 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk->res_name);
return -ENODEV;
}
+
+ data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ dev_err(&pdev->dev,
+ "error reading RPMh aux data for %s (%d)\n",
+ rpmh_clk->res_name, ret);
+ return ret;
+ }
+
+ /* Convert unit from Khz to Hz */
+ if (aux_data_len == sizeof(*data))
+ rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL;
+
rpmh_clk->res_addr += res_addr;
rpmh_clk->dev = &pdev->dev;
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index d3aadaeb2903..22dd42ad9223 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -655,10 +655,73 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
.num_clks = ARRAY_SIZE(qcs404_clks),
};
+/* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, div_clk1, div_clk1_a, 0xb);
+DEFINE_CLK_SMD_RPM(msm8998, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk1, ln_bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk2, ln_bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin,
+ 3);
+DEFINE_CLK_SMD_RPM(msm8998, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 2);
+DEFINE_CLK_SMD_RPM_QDSS(msm8998, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
+static struct clk_smd_rpm *msm8998_clks[] = {
+ [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &msm8998_cnoc_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8998_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8998_ce1_a_clk,
+ [RPM_SMD_DIV_CLK1] = &msm8998_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &msm8998_div_clk1_a,
+ [RPM_SMD_IPA_CLK] = &msm8998_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8998_ipa_a_clk,
+ [RPM_SMD_LN_BB_CLK1] = &msm8998_ln_bb_clk1,
+ [RPM_SMD_LN_BB_CLK1_A] = &msm8998_ln_bb_clk1_a,
+ [RPM_SMD_LN_BB_CLK2] = &msm8998_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &msm8998_ln_bb_clk2_a,
+ [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
+ [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
+ [RPM_SMD_MMAXI_CLK] = &msm8998_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMAXI_A_CLK] = &msm8998_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_AGGR1_NOC_CLK] = &msm8998_aggre1_noc_clk,
+ [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8998_aggre1_noc_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &msm8998_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8998_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8998_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8998_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &msm8998_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8998_rf_clk1_a,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8998_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8998_rf_clk2_a_pin,
+ [RPM_SMD_RF_CLK3] = &msm8998_rf_clk3,
+ [RPM_SMD_RF_CLK3_A] = &msm8998_rf_clk3_a,
+ [RPM_SMD_RF_CLK3_PIN] = &msm8998_rf_clk3_pin,
+ [RPM_SMD_RF_CLK3_A_PIN] = &msm8998_rf_clk3_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
+ .clks = msm8998_clks,
+ .num_clks = ARRAY_SIZE(msm8998_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
+ { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
{ }
};
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 0a48ed56833b..a6b2f86112d8 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -231,6 +231,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
+ size_t num_clk_hws = desc->num_clk_hws;
+ struct clk_hw **clk_hws = desc->clk_hws;
cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
if (!cc)
@@ -269,6 +271,12 @@ int qcom_cc_really_probe(struct platform_device *pdev,
qcom_cc_drop_protected(dev, cc);
+ for (i = 0; i < num_clk_hws; i++) {
+ ret = devm_clk_hw_register(dev, clk_hws[i]);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < num_clks; i++) {
if (!rclks[i])
continue;
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 4aa33ee70bae..1e2a8bdac55a 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -27,6 +27,8 @@ struct qcom_cc_desc {
size_t num_resets;
struct gdsc **gdscs;
size_t num_gdscs;
+ struct clk_hw **clk_hws;
+ size_t num_clk_hws;
};
/**
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 505c6263141d..0e32892b438c 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -4715,18 +4715,12 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = {
.num_clks = ARRAY_SIZE(gcc_ipq8074_clks),
.resets = gcc_ipq8074_resets,
.num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
+ .clk_hws = gcc_ipq8074_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws),
};
static int gcc_ipq8074_probe(struct platform_device *pdev)
{
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(gcc_ipq8074_hws); i++) {
- ret = devm_clk_hw_register(&pdev->dev, gcc_ipq8074_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
}
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 849046fbed6d..8c6d93144b9c 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -1702,6 +1702,8 @@ static const struct qcom_cc_desc gcc_mdm9615_desc = {
.num_clks = ARRAY_SIZE(gcc_mdm9615_clks),
.resets = gcc_mdm9615_resets,
.num_resets = ARRAY_SIZE(gcc_mdm9615_resets),
+ .clk_hws = gcc_mdm9615_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_mdm9615_hws),
};
static const struct of_device_id gcc_mdm9615_match_table[] = {
@@ -1712,21 +1714,12 @@ MODULE_DEVICE_TABLE(of, gcc_mdm9615_match_table);
static int gcc_mdm9615_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct regmap *regmap;
- int ret;
- int i;
regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- for (i = 0; i < ARRAY_SIZE(gcc_mdm9615_hws); i++) {
- ret = devm_clk_hw_register(dev, gcc_mdm9615_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9d136172c27c..4632b9272b7f 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -3656,6 +3656,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = {
.num_resets = ARRAY_SIZE(gcc_msm8996_resets),
.gdscs = gcc_msm8996_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
+ .clk_hws = gcc_msm8996_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_msm8996_hws),
};
static const struct of_device_id gcc_msm8996_match_table[] = {
@@ -3666,8 +3668,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8996_match_table);
static int gcc_msm8996_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- int i, ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &gcc_msm8996_desc);
@@ -3680,12 +3680,6 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
*/
regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
- for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) {
- ret = devm_clk_hw_register(dev, gcc_msm8996_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 1b779396e04f..c240fba794c7 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1112,6 +1112,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
{ }
@@ -1189,6 +1190,7 @@ static struct clk_branch gcc_aggre1_ufs_axi_clk = {
"ufs_axi_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1206,6 +1208,7 @@ static struct clk_branch gcc_aggre1_usb3_axi_clk = {
"usb30_master_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1288,6 +1291,7 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
"blsp1_qup1_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1305,6 +1309,7 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
"blsp1_qup1_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1322,6 +1327,7 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
"blsp1_qup2_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1339,6 +1345,7 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
"blsp1_qup2_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1356,6 +1363,7 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
"blsp1_qup3_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1373,6 +1381,7 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
"blsp1_qup3_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1390,6 +1399,7 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
"blsp1_qup4_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1407,6 +1417,7 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
"blsp1_qup4_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1424,6 +1435,7 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
"blsp1_qup5_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1441,6 +1453,7 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
"blsp1_qup5_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1458,6 +1471,7 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
"blsp1_qup6_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1475,6 +1489,7 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
"blsp1_qup6_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1505,6 +1520,7 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
"blsp1_uart1_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1522,6 +1538,7 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
"blsp1_uart2_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1539,6 +1556,7 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
"blsp1_uart3_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1569,6 +1587,7 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
"blsp2_qup1_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1586,6 +1605,7 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
"blsp2_qup1_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1603,6 +1623,7 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
"blsp2_qup2_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1620,6 +1641,7 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
"blsp2_qup2_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1637,6 +1659,7 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
"blsp2_qup3_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1654,6 +1677,7 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
"blsp2_qup3_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1671,6 +1695,7 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
"blsp2_qup4_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1688,6 +1713,7 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
"blsp2_qup4_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1705,6 +1731,7 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
"blsp2_qup5_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1722,6 +1749,7 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
"blsp2_qup5_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1739,6 +1767,7 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
"blsp2_qup6_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1756,6 +1785,7 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
"blsp2_qup6_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1786,6 +1816,7 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
"blsp2_uart1_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1803,6 +1834,7 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
"blsp2_uart2_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1820,6 +1852,7 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
"blsp2_uart3_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1837,6 +1870,7 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
"usb30_master_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1854,6 +1888,7 @@ static struct clk_branch gcc_gp1_clk = {
"gp1_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1871,6 +1906,7 @@ static struct clk_branch gcc_gp2_clk = {
"gp2_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1888,6 +1924,7 @@ static struct clk_branch gcc_gp3_clk = {
"gp3_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1957,6 +1994,7 @@ static struct clk_branch gcc_hmss_ahb_clk = {
"hmss_ahb_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1987,6 +2025,7 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
"hmss_rbcpr_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2088,6 +2127,7 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
"pcie_aux_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2157,6 +2197,7 @@ static struct clk_branch gcc_pcie_phy_aux_clk = {
"pcie_aux_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2174,6 +2215,7 @@ static struct clk_branch gcc_pdm2_clk = {
"pdm2_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2243,6 +2285,7 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
"sdcc2_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2273,6 +2316,7 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
"sdcc4_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2316,6 +2360,7 @@ static struct clk_branch gcc_tsif_ref_clk = {
"tsif_ref_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2346,6 +2391,7 @@ static struct clk_branch gcc_ufs_axi_clk = {
"ufs_axi_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2441,6 +2487,7 @@ static struct clk_branch gcc_usb30_master_clk = {
"usb30_master_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2458,6 +2505,7 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
"usb30_mock_utmi_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2488,6 +2536,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
"usb3_phy_aux_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2495,7 +2544,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
static struct clk_branch gcc_usb3_phy_pipe_clk = {
.halt_reg = 0x50004,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x50004,
.enable_mask = BIT(0),
@@ -2910,6 +2959,10 @@ static const struct regmap_config gcc_msm8998_regmap_config = {
.fast_io = true,
};
+static struct clk_hw *gcc_msm8998_hws[] = {
+ &xo.hw,
+};
+
static const struct qcom_cc_desc gcc_msm8998_desc = {
.config = &gcc_msm8998_regmap_config,
.clks = gcc_msm8998_clocks,
@@ -2918,6 +2971,8 @@ static const struct qcom_cc_desc gcc_msm8998_desc = {
.num_resets = ARRAY_SIZE(gcc_msm8998_resets),
.gdscs = gcc_msm8998_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
+ .clk_hws = gcc_msm8998_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_msm8998_hws),
};
static int gcc_msm8998_probe(struct platform_device *pdev)
@@ -2937,10 +2992,6 @@ static int gcc_msm8998_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_clk_hw_register(&pdev->dev, &xo.hw);
- if (ret)
- return ret;
-
return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 64da032bb9ed..5a62f64ada93 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -678,6 +678,7 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.cmd_rcgr = 0x4014,
.mnd_width = 16,
.hid_width = 5,
+ .cfg_off = 0x20,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
@@ -2692,6 +2693,8 @@ static const struct qcom_cc_desc gcc_qcs404_desc = {
.num_clks = ARRAY_SIZE(gcc_qcs404_clocks),
.resets = gcc_qcs404_resets,
.num_resets = ARRAY_SIZE(gcc_qcs404_resets),
+ .clk_hws = gcc_qcs404_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_qcs404_hws),
};
static const struct of_device_id gcc_qcs404_match_table[] = {
@@ -2703,7 +2706,6 @@ MODULE_DEVICE_TABLE(of, gcc_qcs404_match_table);
static int gcc_qcs404_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- int ret, i;
regmap = qcom_cc_map(pdev, &gcc_qcs404_desc);
if (IS_ERR(regmap))
@@ -2711,12 +2713,6 @@ static int gcc_qcs404_probe(struct platform_device *pdev)
clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
- for (i = 0; i < ARRAY_SIZE(gcc_qcs404_hws); i++) {
- ret = devm_clk_hw_register(&pdev->dev, gcc_qcs404_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index ba239ea4c842..8827db23066f 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2420,6 +2420,8 @@ static const struct qcom_cc_desc gcc_sdm660_desc = {
.num_resets = ARRAY_SIZE(gcc_sdm660_resets),
.gdscs = gcc_sdm660_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs),
+ .clk_hws = gcc_sdm660_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_sdm660_hws),
};
static const struct of_device_id gcc_sdm660_match_table[] = {
@@ -2431,7 +2433,7 @@ MODULE_DEVICE_TABLE(of, gcc_sdm660_match_table);
static int gcc_sdm660_probe(struct platform_device *pdev)
{
- int i, ret;
+ int ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &gcc_sdm660_desc);
@@ -2446,13 +2448,6 @@ static int gcc_sdm660_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Register the hws */
- for (i = 0; i < ARRAY_SIZE(gcc_sdm660_hws); i++) {
- ret = devm_clk_hw_register(&pdev->dev, gcc_sdm660_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 58fa5c247af1..7131dcf9b060 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1703,6 +1703,9 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_0_pipe_clk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1802,6 +1805,8 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(30),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_1_pipe_clk" },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 7d4ee109435c..7235510eac94 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -3347,6 +3347,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = {
.num_resets = ARRAY_SIZE(mmcc_msm8996_resets),
.gdscs = mmcc_msm8996_gdscs,
.num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs),
+ .clk_hws = mmcc_msm8996_hws,
+ .num_clk_hws = ARRAY_SIZE(mmcc_msm8996_hws),
};
static const struct of_device_id mmcc_msm8996_match_table[] = {
@@ -3357,8 +3359,6 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table);
static int mmcc_msm8996_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- int i, ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc);
@@ -3370,12 +3370,6 @@ static int mmcc_msm8996_probe(struct platform_device *pdev)
/* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
regmap_update_bits(regmap, 0x5054, BIT(15), 0);
- for (i = 0; i < ARRAY_SIZE(mmcc_msm8996_hws); i++) {
- ret = devm_clk_hw_register(dev, mmcc_msm8996_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
}
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 10e852518870..4d92b27a6153 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -21,7 +21,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A774A1_CLK_OSC,
+ LAST_DT_CORE_CLK = R8A774A1_CLK_CANFD,
/* External Input Clocks */
CLK_EXTAL,
@@ -102,6 +102,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_DIV6P1("canfd", R8A774A1_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
@@ -191,6 +192,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4),
DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4),
DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 10b96895d452..34e274f2a273 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -22,7 +22,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX,
+ LAST_DT_CORE_CLK = R8A774C0_CLK_CANFD,
/* External Input Clocks */
CLK_EXTAL,
@@ -33,6 +33,7 @@ enum clk_ids {
CLK_PLL1,
CLK_PLL3,
CLK_PLL0D4,
+ CLK_PLL0D6,
CLK_PLL0D8,
CLK_PLL0D20,
CLK_PLL0D24,
@@ -61,6 +62,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100),
DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1),
+ DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1),
DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1),
DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1),
DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1),
@@ -112,6 +114,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
+ DEF_DIV6P1("canfd", R8A774C0_CLK_CANFD, CLK_PLL0D6, 0x244),
DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c),
DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014),
@@ -119,6 +122,11 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A774C0_CLK_S0D6C),
+ DEF_MOD("tmu3", 122, R8A774C0_CLK_S3D2C),
+ DEF_MOD("tmu2", 123, R8A774C0_CLK_S3D2C),
+ DEF_MOD("tmu1", 124, R8A774C0_CLK_S3D2C),
+ DEF_MOD("tmu0", 125, R8A774C0_CLK_CP),
DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C),
DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C),
DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C),
@@ -172,8 +180,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
- DEF_MOD("du1", 723, R8A774C0_CLK_S2D1),
- DEF_MOD("du0", 724, R8A774C0_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A774C0_CLK_S1D1),
+ DEF_MOD("du0", 724, R8A774C0_CLK_S1D1),
DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1),
DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2),
@@ -187,6 +195,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4),
DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4),
DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 25a3083b6764..f9e07fcc0d96 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_OCO,
/* Module Clocks */
@@ -65,8 +66,14 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_RATE(".oco", CLK_OCO, 32768),
+ DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A77980_CLK_RPC),
+
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
@@ -164,6 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
+ DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC),
DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index be2ccbd6d623..9a8071a8114d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -30,6 +30,21 @@
#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
+static spinlock_t cpg_lock;
+
+static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&cpg_lock, flags);
+ val = readl(reg);
+ val &= ~clear;
+ val |= set;
+ writel(val, reg);
+ spin_unlock_irqrestore(&cpg_lock, flags);
+};
+
struct cpg_simple_notifier {
struct notifier_block nb;
void __iomem *reg;
@@ -118,7 +133,6 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult;
unsigned int i;
- u32 val, kick;
/* Factor of 2 is for fixed divider */
mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
@@ -127,17 +141,14 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- val = readl(zclk->reg) & ~zclk->mask;
- val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask;
- writel(val, zclk->reg);
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
* clock change completion.
*/
- kick = readl(zclk->kick_reg);
- kick |= CPG_FRQCRB_KICK;
- writel(kick, zclk->kick_reg);
+ cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
/*
* Note: There is no HW information about the worst case latency.
@@ -266,12 +277,10 @@ static const struct sd_div_table cpg_sd_div_table[] = {
static int cpg_sd_clock_enable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- u32 val = readl(clock->csn.reg);
-
- val &= ~(CPG_SD_STP_MASK);
- val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
- writel(val, clock->csn.reg);
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
+ clock->div_table[clock->cur_div_idx].val &
+ CPG_SD_STP_MASK);
return 0;
}
@@ -280,7 +289,7 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg);
+ cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
}
static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
@@ -327,7 +336,6 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct sd_clock *clock = to_sd_clock(hw);
unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
- u32 val;
unsigned int i;
for (i = 0; i < clock->div_num; i++)
@@ -339,10 +347,9 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
clock->cur_div_idx = i;
- val = readl(clock->csn.reg);
- val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- writel(val, clock->csn.reg);
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
+ clock->div_table[i].val &
+ (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
return 0;
}
@@ -415,6 +422,92 @@ free_clock:
return clk;
}
+struct rpc_clock {
+ struct clk_divider div;
+ struct clk_gate gate;
+ /*
+ * One notifier covers both RPC and RPCD2 clocks as they are both
+ * controlled by the same RPCCKCR register...
+ */
+ struct cpg_simple_notifier csn;
+};
+
+static const struct clk_div_table cpg_rpcsrc_div_table[] = {
+ { 2, 5 }, { 3, 6 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_rpc_div_table[] = {
+ { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
+};
+
+static struct clk * __init cpg_rpc_clk_register(const char *name,
+ void __iomem *base, const char *parent_name,
+ struct raw_notifier_head *notifiers)
+{
+ struct rpc_clock *rpc;
+ struct clk *clk;
+
+ rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return ERR_PTR(-ENOMEM);
+
+ rpc->div.reg = base + CPG_RPCCKCR;
+ rpc->div.width = 3;
+ rpc->div.table = cpg_rpc_div_table;
+ rpc->div.lock = &cpg_lock;
+
+ rpc->gate.reg = base + CPG_RPCCKCR;
+ rpc->gate.bit_idx = 8;
+ rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpc->gate.lock = &cpg_lock;
+
+ rpc->csn.reg = base + CPG_RPCCKCR;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpc->div.hw, &clk_divider_ops,
+ &rpc->gate.hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk)) {
+ kfree(rpc);
+ return clk;
+ }
+
+ cpg_simple_notifier_register(notifiers, &rpc->csn);
+ return clk;
+}
+
+struct rpcd2_clock {
+ struct clk_fixed_factor fixed;
+ struct clk_gate gate;
+};
+
+static struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ void __iomem *base,
+ const char *parent_name)
+{
+ struct rpcd2_clock *rpcd2;
+ struct clk *clk;
+
+ rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
+ if (!rpcd2)
+ return ERR_PTR(-ENOMEM);
+
+ rpcd2->fixed.mult = 1;
+ rpcd2->fixed.div = 2;
+
+ rpcd2->gate.reg = base + CPG_RPCCKCR;
+ rpcd2->gate.bit_idx = 9;
+ rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpcd2->gate.lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpcd2->fixed.hw, &clk_fixed_factor_ops,
+ &rpcd2->gate.hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk))
+ kfree(rpcd2);
+
+ return clk;
+}
+
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
@@ -593,6 +686,21 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
}
break;
+ case CLK_TYPE_GEN3_RPCSRC:
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_RPCCKCR, 3, 2, 0,
+ cpg_rpcsrc_div_table,
+ &cpg_lock);
+
+ case CLK_TYPE_GEN3_RPC:
+ return cpg_rpc_clk_register(core->name, base,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_GEN3_RPCD2:
+ return cpg_rpcd2_clk_register(core->name, base,
+ __clk_get_name(parent));
+
default:
return ERR_PTR(-EINVAL);
}
@@ -613,5 +721,8 @@ int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
if (attr)
cpg_quirks = (uintptr_t)attr->data;
pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
+
+ spin_lock_init(&cpg_lock);
+
return 0;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index f4fb6cf16688..eac1b057455a 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -23,6 +23,9 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_Z2,
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
+ CLK_TYPE_GEN3_RPCSRC,
+ CLK_TYPE_GEN3_RPC,
+ CLK_TYPE_GEN3_RPCD2,
/* SoC specific definitions start here */
CLK_TYPE_GEN3_SOC_BASE,
@@ -57,6 +60,7 @@ struct rcar_gen3_cpg_pll_config {
u8 osc_prediv;
};
+#define CPG_RPCCKCR 0x238
#define CPG_RCKCR 0x240
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 7ea20341e870..5ecf28854876 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -586,12 +586,12 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
COMPOSITE(0, "dclk_lcdc0_src", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS,
RK2928_CLKGATE_CON(3), 1, GFLAGS),
- MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, 0,
+ MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(27), 4, 1, MFLAGS),
COMPOSITE(0, "dclk_lcdc1_src", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, 0,
+ MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(28), 4, 1, MFLAGS),
COMPOSITE_NOMUX(0, "cif1_pre", "cif_src", 0,
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index faa94adb2a37..65ab5c2f48b0 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = {
static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = {
/* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
- RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217),
+ RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218),
/* vco = 1016064000 */
- RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088),
+ RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089),
/* vco = 983040000 */
- RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088),
+ RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089),
/* vco = 983040000 */
- RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088),
+ RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089),
/* vco = 860156000 */
- RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894),
+ RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895),
/* vco = 903168000 */
- RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329),
+ RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330),
/* vco = 819200000 */
{ /* sentinel */ },
};
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 59d4d46667ce..54066e6508d3 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1028,6 +1028,7 @@ static unsigned long __init exynos4_get_xom(void)
xom = readl(chipid_base + 8);
iounmap(chipid_base);
+ of_node_put(np);
}
return xom;
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 93306283d764..8ae44b5db4c2 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
{
struct of_phandle_args genpdspec = { .np = pd_node };
struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
- pdev = platform_device_alloc(info->pd_name, -1);
pdev->dev.parent = parent;
- pdev->driver_override = "exynos5-subcmu";
platform_set_drvdata(pdev, (void *)info);
of_genpd_add_device(&genpdspec, &pdev->dev);
- platform_device_add(pdev);
+ ret = platform_device_add(pdev);
+ if (ret)
+ platform_device_put(pdev);
- return 0;
+ return ret;
}
static int __init exynos5_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 751e2c4fb65b..dae1c96de933 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -559,7 +559,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
/* ENABLE_ACLK_TOP */
GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0),
- GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266",
+ GATE(CLK_ACLK_IMEM_SSSX_266, "aclk_imem_sssx_266",
"div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
29, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400",
@@ -568,10 +568,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400",
ENABLE_ACLK_TOP, 25,
CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
- GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266",
+ GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_200",
ENABLE_ACLK_TOP, 24,
CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
- GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200",
+ GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_266",
ENABLE_ACLK_TOP, 23,
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
@@ -5467,6 +5467,35 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
.clk_name = "aclk_cam1_400",
};
+/*
+ * Register offset definitions for CMU_IMEM
+ */
+#define ENABLE_ACLK_IMEM_SLIMSSS 0x080c
+#define ENABLE_PCLK_IMEM_SLIMSSS 0x0908
+
+static const unsigned long imem_clk_regs[] __initconst = {
+ ENABLE_ACLK_IMEM_SLIMSSS,
+ ENABLE_PCLK_IMEM_SLIMSSS,
+};
+
+static const struct samsung_gate_clock imem_gate_clks[] __initconst = {
+ /* ENABLE_ACLK_IMEM_SLIMSSS */
+ GATE(CLK_ACLK_SLIMSSS, "aclk_slimsss", "aclk_imem_sssx_266",
+ ENABLE_ACLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_IMEM_SLIMSSS */
+ GATE(CLK_PCLK_SLIMSSS, "pclk_slimsss", "aclk_imem_200",
+ ENABLE_PCLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info imem_cmu_info __initconst = {
+ .gate_clks = imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
+ .nr_clk_ids = IMEM_NR_CLK,
+ .clk_regs = imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
+ .clk_name = "aclk_imem_200",
+};
struct exynos5433_cmu_data {
struct samsung_clk_reg_dump *clk_save;
@@ -5655,6 +5684,9 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
.compatible = "samsung,exynos5433-cmu-mscl",
.data = &mscl_cmu_info,
}, {
+ .compatible = "samsung,exynos5433-cmu-imem",
+ .data = &imem_cmu_info,
+ }, {
},
};
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 884067e4f1a1..f38f0e24e3b6 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -389,7 +389,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c2450_gates));
samsung_clk_register_alias(ctx, s3c2450_aliases,
ARRAY_SIZE(s3c2450_aliases));
- /* fall through, as s3c2450 extends the s3c2416 clocks */
+ /* fall through - as s3c2450 extends the s3c2416 clocks */
case S3C2416:
samsung_clk_register_div(ctx, s3c2416_dividers,
ARRAY_SIZE(s3c2416_dividers));
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index c3f309d7100d..9cfaca5fbcdb 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -26,7 +26,7 @@ struct samsung_clk_provider {
void __iomem *reg_base;
struct device *dev;
spinlock_t lock;
- /* clk_data must be the last entry due to variable lenght 'hws' array */
+ /* clk_data must be the last entry due to variable length 'hws' array */
struct clk_hw_onecell_data clk_data;
};
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index aa7a6e6a15b6..73e03328d5c5 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -176,8 +176,7 @@ static struct clk_ops gateclk_ops = {
.set_parent = socfpga_clk_set_parent,
};
-static void __init __socfpga_gate_init(struct device_node *node,
- const struct clk_ops *ops)
+void __init socfpga_gate_init(struct device_node *node)
{
u32 clk_gate[2];
u32 div_reg[3];
@@ -188,12 +187,17 @@ static void __init __socfpga_gate_init(struct device_node *node,
const char *clk_name = node->name;
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
+ struct clk_ops *ops;
int rc;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
return;
+ ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
+ if (WARN_ON(!ops))
+ return;
+
rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
if (rc)
clk_gate[0] = 0;
@@ -202,8 +206,8 @@ static void __init __socfpga_gate_init(struct device_node *node,
socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
socfpga_clk->hw.bit_idx = clk_gate[1];
- gateclk_ops.enable = clk_gate_ops.enable;
- gateclk_ops.disable = clk_gate_ops.disable;
+ ops->enable = clk_gate_ops.enable;
+ ops->disable = clk_gate_ops.disable;
}
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
@@ -234,6 +238,11 @@ static void __init __socfpga_gate_init(struct device_node *node,
init.flags = 0;
init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
+ if (init.num_parents < 2) {
+ ops->get_parent = NULL;
+ ops->set_parent = NULL;
+ }
+
init.parent_names = parent_name;
socfpga_clk->hw.hw.init = &init;
@@ -246,8 +255,3 @@ static void __init __socfpga_gate_init(struct device_node *node,
if (WARN_ON(rc))
return;
}
-
-void __init socfpga_gate_init(struct device_node *node)
-{
- __socfpga_gate_init(node, &gateclk_ops);
-}
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 35fabe1a32c3..269467e8e07e 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
+ of_node_put(clkmgr_np);
BUG_ON(!clk_mgr_a10_base_addr);
pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index c7f463172e4b..b4b44e9b5901 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+ of_node_put(clkmgr_np);
BUG_ON(!clk_mgr_base_addr);
pll_clk->hw.reg = clk_mgr_base_addr + reg;
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index a4fa2945f230..4b5f8f4e4ab8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -144,7 +144,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
8, 4, /* N */
4, 2, /* K */
0, 4, /* M */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 0400e5b1d627..1fc71baae13b 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1293,8 +1293,8 @@ static int attr_enable_set(void *data, u64 val)
return val ? dfll_enable(td) : dfll_disable(td);
}
-DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
+ "%llu\n");
static int attr_lock_get(void *data, u64 *val)
{
@@ -1310,8 +1310,7 @@ static int attr_lock_set(void *data, u64 val)
return val ? dfll_lock(td) : dfll_unlock(td);
}
-DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, "%llu\n");
static int attr_rate_get(void *data, u64 *val)
{
@@ -1328,7 +1327,7 @@ static int attr_rate_set(void *data, u64 val)
return dfll_request_rate(td, val);
}
-DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
static int attr_registers_show(struct seq_file *s, void *data)
{
@@ -1379,10 +1378,11 @@ static void dfll_debug_init(struct tegra_dfll *td)
root = debugfs_create_dir("tegra_dfll_fcpu", NULL);
td->debugfs_dir = root;
- debugfs_create_file("enable", S_IRUGO | S_IWUSR, root, td, &enable_fops);
- debugfs_create_file("lock", S_IRUGO, root, td, &lock_fops);
- debugfs_create_file("rate", S_IRUGO, root, td, &rate_fops);
- debugfs_create_file("registers", S_IRUGO, root, td, &attr_registers_fops);
+ debugfs_create_file_unsafe("enable", 0644, root, td,
+ &enable_fops);
+ debugfs_create_file_unsafe("lock", 0444, root, td, &lock_fops);
+ debugfs_create_file_unsafe("rate", 0444, root, td, &rate_fops);
+ debugfs_create_file("registers", 0444, root, td, &attr_registers_fops);
}
#else
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 688e403333b9..0c210984765a 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -614,7 +614,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d,
init.name = child_name;
init.ops = ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
co->hw.init = &init;
parent_names[0] = __clk_get_name(clk0);
parent_names[1] = __clk_get_name(clk1);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 222f68bc3f2a..015a657d3382 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -165,7 +165,7 @@ static void __init omap_clk_register_apll(void *user,
ad->clk_bypass = __clk_get_hw(clk);
- clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(clk_hw->hw.init->parent_names);
@@ -402,7 +402,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
if (ret)
goto cleanup;
- clk = clk_register(NULL, &clk_hw->hw);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(init);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 7bb9afbe4058..1cae226759dd 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -35,7 +35,44 @@ struct clk_ti_autoidle {
#define AUTOIDLE_LOW 0x1
static LIST_HEAD(autoidle_clks);
-static LIST_HEAD(clk_hw_omap_clocks);
+
+/*
+ * we have some non-atomic read/write
+ * operations behind it, so lets
+ * take one lock for handling autoidle
+ * of all clocks
+ */
+static DEFINE_SPINLOCK(autoidle_spinlock);
+
+static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
+{
+ if (clk->ops && clk->ops->deny_idle) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&autoidle_spinlock, irqflags);
+ clk->autoidle_count++;
+ if (clk->autoidle_count == 1)
+ clk->ops->deny_idle(clk);
+
+ spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
+ }
+ return 0;
+}
+
+static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
+{
+ if (clk->ops && clk->ops->allow_idle) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&autoidle_spinlock, irqflags);
+ clk->autoidle_count--;
+ if (clk->autoidle_count == 0)
+ clk->ops->allow_idle(clk);
+
+ spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
+ }
+ return 0;
+}
/**
* omap2_clk_deny_idle - disable autoidle on an OMAP clock
@@ -45,12 +82,15 @@ static LIST_HEAD(clk_hw_omap_clocks);
*/
int omap2_clk_deny_idle(struct clk *clk)
{
- struct clk_hw_omap *c;
+ struct clk_hw *hw = __clk_get_hw(clk);
- c = to_clk_hw_omap(__clk_get_hw(clk));
- if (c->ops && c->ops->deny_idle)
- c->ops->deny_idle(c);
- return 0;
+ if (omap2_clk_is_hw_omap(hw)) {
+ struct clk_hw_omap *c = to_clk_hw_omap(hw);
+
+ return _omap2_clk_deny_idle(c);
+ }
+
+ return -EINVAL;
}
/**
@@ -61,12 +101,15 @@ int omap2_clk_deny_idle(struct clk *clk)
*/
int omap2_clk_allow_idle(struct clk *clk)
{
- struct clk_hw_omap *c;
+ struct clk_hw *hw = __clk_get_hw(clk);
- c = to_clk_hw_omap(__clk_get_hw(clk));
- if (c->ops && c->ops->allow_idle)
- c->ops->allow_idle(c);
- return 0;
+ if (omap2_clk_is_hw_omap(hw)) {
+ struct clk_hw_omap *c = to_clk_hw_omap(hw);
+
+ return _omap2_clk_allow_idle(c);
+ }
+
+ return -EINVAL;
}
static void _allow_autoidle(struct clk_ti_autoidle *clk)
@@ -168,26 +211,6 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
}
/**
- * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
- * @hw: struct clk_hw * to initialize
- *
- * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
- * temporarily for autoidle handling, until this support can be
- * integrated into the common clock framework code in some way. No
- * return value.
- */
-void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
-{
- struct clk_hw_omap *c;
-
- if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
- return;
-
- c = to_clk_hw_omap(hw);
- list_add(&c->node, &clk_hw_omap_clocks);
-}
-
-/**
* omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
* support it
*
@@ -198,11 +221,11 @@ void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
*/
int omap2_clk_enable_autoidle_all(void)
{
- struct clk_hw_omap *c;
+ int ret;
- list_for_each_entry(c, &clk_hw_omap_clocks, node)
- if (c->ops && c->ops->allow_idle)
- c->ops->allow_idle(c);
+ ret = omap2_clk_for_each(_omap2_clk_allow_idle);
+ if (ret)
+ return ret;
_clk_generic_allow_autoidle_all();
@@ -220,11 +243,11 @@ int omap2_clk_enable_autoidle_all(void)
*/
int omap2_clk_disable_autoidle_all(void)
{
- struct clk_hw_omap *c;
+ int ret;
- list_for_each_entry(c, &clk_hw_omap_clocks, node)
- if (c->ops && c->ops->deny_idle)
- c->ops->deny_idle(c);
+ ret = omap2_clk_for_each(_omap2_clk_deny_idle);
+ if (ret)
+ return ret;
_clk_generic_deny_autoidle_all();
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 5d7fb2eecce4..ba17cc5bd04b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -31,6 +31,7 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
+static LIST_HEAD(clk_hw_omap_clocks);
struct ti_clk_ll_ops *ti_clk_ll_ops;
static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
@@ -191,9 +192,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
clkdev_add(&c->lk);
} else {
if (num_args && !has_clkctrl_data) {
- if (of_find_compatible_node(NULL, NULL,
- "ti,clkctrl")) {
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "ti,clkctrl");
+ if (np) {
has_clkctrl_data = true;
+ of_node_put(np);
} else {
clkctrl_nodes_missing = true;
@@ -520,3 +525,74 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
return clk;
}
+
+/**
+ * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
+ * @dev: device for this clock
+ * @hw: hardware clock handle
+ * @con: connection ID for this clock
+ *
+ * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias
+ * for it, and adds the list to the available clk_hw_omap type clocks.
+ * Returns a handle to the registered clock if successful, ERR_PTR value
+ * in failure.
+ */
+struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+ const char *con)
+{
+ struct clk *clk;
+ struct clk_hw_omap *oclk;
+
+ clk = ti_clk_register(dev, hw, con);
+ if (IS_ERR(clk))
+ return clk;
+
+ oclk = to_clk_hw_omap(hw);
+
+ list_add(&oclk->node, &clk_hw_omap_clocks);
+
+ return clk;
+}
+
+/**
+ * omap2_clk_for_each - call function for each registered clk_hw_omap
+ * @fn: pointer to a callback function
+ *
+ * Call @fn for each registered clk_hw_omap, passing @hw to each
+ * function. @fn must return 0 for success or any other value for
+ * failure. If @fn returns non-zero, the iteration across clocks
+ * will stop and the non-zero return value will be passed to the
+ * caller of omap2_clk_for_each().
+ */
+int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw))
+{
+ int ret;
+ struct clk_hw_omap *hw;
+
+ list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
+ ret = (*fn)(hw);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock
+ * @hw: clk_hw to check if it is an omap clock or not
+ *
+ * Checks if the provided clk_hw is OMAP clock or not. Returns true if
+ * it is, false otherwise.
+ */
+bool omap2_clk_is_hw_omap(struct clk_hw *hw)
+{
+ struct clk_hw_omap *oclk;
+
+ list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
+ if (&oclk->hw == hw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 40630eb950fc..639f515e08f0 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -276,7 +276,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
init.parent_names = parents;
init.num_parents = num_parents;
init.ops = ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
clk = ti_clk_register(NULL, clk_hw, init.name);
if (IS_ERR_OR_NULL(clk)) {
@@ -530,7 +530,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
* Create default clkdm name, replace _cm from end of parent
* node name with _clkdm
*/
- provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0;
+ provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
} else {
provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
if (!provider->clkdm_name) {
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 9f312a219510..1c0fac59d809 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -203,6 +203,8 @@ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
const char *con);
+struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+ const char *con);
int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
void ti_clk_add_aliases(void);
@@ -221,7 +223,6 @@ int ti_clk_retry_init(struct device_node *node, void *user,
ti_of_clk_init_cb_t func);
int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
-void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
int of_ti_clk_autoidle_setup(struct device_node *node);
void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
@@ -301,6 +302,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
unsigned long *parent_rate);
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
+int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
+bool omap2_clk_is_hw_omap(struct clk_hw *hw);
extern struct ti_clk_ll_ops *ti_clk_ll_ops;
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 07a805125e98..423a99b9f10c 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -143,7 +143,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
continue;
}
clk_hw = __clk_get_hw(clk);
- if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
+ if (!omap2_clk_is_hw_omap(clk_hw)) {
pr_warn("can't setup clkdm for basic clk %s\n",
__clk_get_name(clk));
continue;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 0241450f3eb3..4786e0ebc2e8 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -336,7 +336,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
init.name = name;
init.ops = &ti_clk_divider_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 6c3329bc116f..659dadb23279 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -192,10 +192,9 @@ static void __init _register_dpll(void *user,
dd->clk_bypass = __clk_get_hw(clk);
/* register the clock */
- clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
- omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(clk_hw->hw.init->parent_names);
kfree(clk_hw->hw.init);
@@ -265,14 +264,12 @@ static void _register_dpll_x2(struct device_node *node,
#endif
/* register the clock */
- clk = ti_clk_register(NULL, &clk_hw->hw, name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk))
kfree(clk_hw);
- } else {
- omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
+ else
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- }
}
#endif
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 44b6b6403753..3dde6c8c3354 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -731,7 +731,7 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
do {
do {
hw = clk_hw_get_parent(hw);
- } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC));
+ } while (hw && (!omap2_clk_is_hw_omap(hw)));
if (!hw)
break;
pclk = to_clk_hw_omap(hw);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 1c78fff5513c..504c0e91cdc7 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -123,7 +123,7 @@ static struct clk *_register_gate(struct device *dev, const char *name,
init.flags = flags;
- clk = ti_clk_register(NULL, &clk_hw->hw, name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
if (IS_ERR(clk))
kfree(clk_hw);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 87e00c2ee957..83e34429d3b1 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -57,12 +57,10 @@ static struct clk *_register_interface(struct device *dev, const char *name,
init.num_parents = 1;
init.parent_names = &parent_name;
- clk = ti_clk_register(NULL, &clk_hw->hw, name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
if (IS_ERR(clk))
kfree(clk_hw);
- else
- omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
return clk;
}
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 883bdde94d04..b7f9a4f068bf 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -143,7 +143,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
init.name = name;
init.ops = &ti_clk_mux_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
index ec11f55594ad..5d2d42b7e182 100644
--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
return ret;
ret = regmap_write_bits(gear->regmap,
- gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
UNIPHIER_CLK_CPUGEAR_UPD_BIT,
UNIPHIER_CLK_CPUGEAR_UPD_BIT);
if (ret)
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 6b40eb89ae19..68bd3abaef2c 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -13,7 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/platform_data/clk-lpss.h>
+#include <linux/platform_data/x86/clk-lpss.h>
#include <linux/platform_device.h>
static int lpt_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index 3a0996f2d556..25d4b97aff9b 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -52,7 +52,8 @@ static int st_clk_probe(struct platform_device *pdev)
0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
CLK_GATE_SET_TO_DISABLE, NULL);
- clk_hw_register_clkdev(hws[ST_CLK_GATE], "oscout1", NULL);
+ devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1",
+ NULL);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0e626b00053b..e10922709d13 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -206,17 +206,15 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
/**
- * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
+ * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
+ * @cpu: CPU to find the policy for.
*
- * @cpu: cpu to find policy for.
+ * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
+ * the kobject reference counter of that policy. Return a valid policy on
+ * success or NULL on failure.
*
- * This returns policy for 'cpu', returns NULL if it doesn't exist.
- * It also increments the kobject reference count to mark it busy and so would
- * require a corresponding call to cpufreq_cpu_put() to decrement it back.
- * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
- * freed as that depends on the kobj count.
- *
- * Return: A valid policy on success, otherwise NULL on failure.
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_put() to balance its kobject reference counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
@@ -243,12 +241,8 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
/**
- * cpufreq_cpu_put: Decrements the usage count of a policy
- *
- * @policy: policy earlier returned by cpufreq_cpu_get().
- *
- * This decrements the kobject reference count incremented earlier by calling
- * cpufreq_cpu_get().
+ * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
+ * @policy: cpufreq policy returned by cpufreq_cpu_get().
*/
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 002f5169d4eb..e22f0dbaebb1 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1762,7 +1762,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
/* Start over if the CPU may have been idle. */
if (delta_ns > TICK_NSEC) {
cpu->iowait_boost = ONE_EIGHTH_FP;
- } else if (cpu->iowait_boost) {
+ } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
cpu->iowait_boost <<= 1;
if (cpu->iowait_boost > int_tofp(1))
cpu->iowait_boost = int_tofp(1);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 46254e583982..74e0e0c20c46 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return ret;
}
-static void __init pxa_cpufreq_init_voltages(void)
+static void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
@@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return 0;
}
-static void __init pxa_cpufreq_init_voltages(void) { }
+static void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index bb93e5cf6a4a..9fddf828a76f 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
+ list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor ||
!strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
(cpuidle_curr_governor->rating < gov->rating &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 61316fc51548..5951604e7d5c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -186,7 +186,7 @@ static unsigned int get_typical_interval(struct menu_device *data,
unsigned int min, max, thresh, avg;
uint64_t sum, variance;
- thresh = UINT_MAX; /* Discard outliers above this value */
+ thresh = INT_MAX; /* Discard outliers above this value */
again:
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 9eac5099098e..579578498deb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3455,7 +3455,6 @@ static int __init caam_algapi_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
@@ -3476,16 +3475,17 @@ static int __init caam_algapi_init(void)
return -ENODEV;
}
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
+ priv = dev_get_drvdata(&pdev->dev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/*
@@ -3626,6 +3626,8 @@ static int __init caam_algapi_init(void)
if (registered)
pr_info("caam algorithms registered in /proc/crypto\n");
+out_put_dev:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index a15ce9213310..c61921d32489 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -2492,12 +2492,15 @@ static int __init caam_qi_algapi_init(void)
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv || !priv->qi_present)
- return -ENODEV;
+ if (!priv || !priv->qi_present) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
if (caam_dpaa2) {
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_dev;
}
/*
@@ -2610,6 +2613,8 @@ static int __init caam_qi_algapi_init(void)
if (registered)
dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+out_put_dev:
+ put_device(ctrldev);
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index d7483e4d0ce2..b1eadc6652b5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1993,7 +1993,6 @@ static int __init caam_algapi_hash_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
int i = 0, err = 0;
struct caam_drv_private *priv;
unsigned int md_limit = SHA512_DIGEST_SIZE;
@@ -2012,16 +2011,17 @@ static int __init caam_algapi_hash_init(void)
return -ENODEV;
}
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
+ priv = dev_get_drvdata(&pdev->dev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/*
* Register crypto algorithms the device supports. First, identify
@@ -2043,8 +2043,10 @@ static int __init caam_algapi_hash_init(void)
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!md_inst)
- return -ENODEV;
+ if (!md_inst) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/* Limit digest size based on LP256 */
if (md_vid == CHA_VER_VID_MD_LP256)
@@ -2101,6 +2103,8 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &hash_list);
}
+out_put_dev:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 77ab28a2811a..58285642306e 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1042,8 +1042,10 @@ static int __init caam_pkc_init(void)
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/* Determine public key hardware accelerator presence. */
if (priv->era < 10)
@@ -1053,8 +1055,10 @@ static int __init caam_pkc_init(void)
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
- if (!pk_inst)
- return -ENODEV;
+ if (!pk_inst) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
err = crypto_register_akcipher(&caam_rsa);
if (err)
@@ -1063,6 +1067,8 @@ static int __init caam_pkc_init(void)
else
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+out_put_dev:
+ put_device(ctrldev);
return err;
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index a387c8d49a62..95eb5402c59f 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -308,7 +308,6 @@ static int __init caam_rng_init(void)
struct device *dev;
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
struct caam_drv_private *priv;
u32 rng_inst;
int err;
@@ -326,16 +325,17 @@ static int __init caam_rng_init(void)
return -ENODEV;
}
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
+ priv = dev_get_drvdata(&pdev->dev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
@@ -344,13 +344,16 @@ static int __init caam_rng_init(void)
else
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
- if (!rng_inst)
- return -ENODEV;
+ if (!rng_inst) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
- return PTR_ERR(dev);
+ err = PTR_ERR(dev);
+ goto out_put_dev;
}
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
@@ -361,6 +364,7 @@ static int __init caam_rng_init(void)
if (err)
goto free_rng_ctx;
+ put_device(&pdev->dev);
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
@@ -368,6 +372,8 @@ free_rng_ctx:
kfree(rng_ctx);
free_caam_alloc:
caam_jr_free(dev);
+out_put_dev:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index f4e625cf53ca..1afdcb81d8ed 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -241,7 +241,7 @@
struct samsung_aes_variant {
unsigned int aes_offset;
unsigned int hash_offset;
- const char *clk_names[];
+ const char *clk_names[2];
};
struct s5p_aes_reqctx {
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 6e928f37d084..0cb8c30ea278 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
{
struct dax_device *dax_dev;
bool dax_enabled = false;
+ pgoff_t pgoff, pgoff_end;
struct request_queue *q;
- pgoff_t pgoff;
- int err, id;
- pfn_t pfn;
- long len;
char buf[BDEVNAME_SIZE];
+ void *kaddr, *end_kaddr;
+ pfn_t pfn, end_pfn;
+ sector_t last_page;
+ long len, len2;
+ int err, id;
if (blocksize != PAGE_SIZE) {
pr_debug("%s: error: unsupported blocksize for dax\n",
@@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
return false;
}
+ last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
+ err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
+ if (err) {
+ pr_debug("%s: error: unaligned partition for dax\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
if (!dax_dev) {
pr_debug("%s: error: device does not support dax\n",
@@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
}
id = dax_read_lock();
- len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
+ len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
+ len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
dax_read_unlock(id);
put_dax(dax_dev);
- if (len < 1) {
+ if (len < 1 || len2 < 1) {
pr_debug("%s: error: dax access failed (%ld)\n",
- bdevname(bdev, buf), len);
+ bdevname(bdev, buf), len < 1 ? len : len2);
return false;
}
@@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
*/
WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
dax_enabled = true;
- } else if (pfn_t_devmap(pfn)) {
- struct dev_pagemap *pgmap;
+ } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
+ struct dev_pagemap *pgmap, *end_pgmap;
pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
- if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
+ end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
+ if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
+ && pfn_t_to_page(pfn)->pgmap == pgmap
+ && pfn_t_to_page(end_pfn)->pgmap == pgmap
+ && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
+ && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
dax_enabled = true;
put_dev_pagemap(pgmap);
+ put_dev_pagemap(end_pgmap);
+
}
if (!dax_enabled) {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d2286c7f7222..0b1dfb5bf2d9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -218,6 +218,20 @@ config FSL_EDMA
multiplexing capability for DMA request sources(slot).
This module can be found on Freescale Vybrid and LS-1 SoCs.
+config FSL_QDMA
+ tristate "NXP Layerscape qDMA engine support"
+ depends on ARM || ARM64
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+ Channel virtualization is supported through enqueuing of DMA jobs to,
+ or dequeuing DMA jobs from, different work queues.
+ This module can be found on NXP Layerscape SoCs.
+ The qdma driver only work on SoCs with a DPAA hardware block.
+
config FSL_RAID
tristate "Freescale RAID engine Support"
depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 09571a81353d..6126e1c3a875 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 01d936c9fe89..a0a9cd76c1d4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0;
- LIST_HEAD(tmp_list);
spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
@@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
spin_lock_irqsave(&atchan->lock, flags);
@@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
if (!atc_chan_is_paused(atchan))
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index ae10f5614f95..ec8a291d62ba 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -2,9 +2,6 @@
/*
* BCM2835 DMA engine support
*
- * This driver only supports cyclic DMA transfers
- * as needed for the I2S module.
- *
* Author: Florian Meier <florian.meier@koalo.de>
* Copyright 2013
*
@@ -42,7 +39,6 @@
struct bcm2835_dmadev {
struct dma_device ddev;
- spinlock_t lock;
void __iomem *base;
struct device_dma_parameters dma_parms;
};
@@ -64,7 +60,6 @@ struct bcm2835_cb_entry {
struct bcm2835_chan {
struct virt_dma_chan vc;
- struct list_head node;
struct dma_slave_config cfg;
unsigned int dreq;
@@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
return NULL;
/* allocate and setup the descriptor. */
- d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
- gfp);
+ d = kzalloc(struct_size(d, cb_list, frames), gfp);
if (!d)
return NULL;
@@ -406,7 +400,7 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
}
}
-static int bcm2835_dma_abort(struct bcm2835_chan *c)
+static void bcm2835_dma_abort(struct bcm2835_chan *c)
{
void __iomem *chan_base = c->chan_base;
long int timeout = 10000;
@@ -416,7 +410,7 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c)
* (The ACTIVE flag in the CS register is not a reliable indicator.)
*/
if (!readl(chan_base + BCM2835_DMA_ADDR))
- return 0;
+ return;
/* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS);
@@ -432,7 +426,6 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c)
"failed to complete outstanding writes\n");
writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
- return 0;
}
static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
@@ -504,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
+ /*
+ * Control blocks are 256 bit in length and must start at a 256 bit
+ * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1).
+ */
c->cb_pool = dma_pool_create(dev_name(dev), dev,
- sizeof(struct bcm2835_dma_cb), 0, 0);
+ sizeof(struct bcm2835_dma_cb), 32, 0);
if (!c->cb_pool) {
dev_err(dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
@@ -774,17 +771,11 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
static int bcm2835_dma_terminate_all(struct dma_chan *chan)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
/* stop DMA activity */
if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd);
@@ -817,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
c->vc.desc_free = bcm2835_dma_desc_free;
vchan_init(&c->vc, &d->ddev);
- INIT_LIST_HEAD(&c->node);
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
@@ -920,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
- spin_lock_init(&od->lock);
platform_set_drvdata(pdev, od);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 15b2453d2647..ffc0adc2f6ce 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
struct axi_dmac_desc *desc;
unsigned int i;
- desc = kzalloc(sizeof(struct axi_dmac_desc) +
- sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+ desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index a8b6225faa12..9ce0a386225b 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
if (!soc_data)
return -EINVAL;
- jzdma = devm_kzalloc(dev, sizeof(*jzdma)
- + sizeof(*jzdma->chan) * soc_data->nb_channels,
- GFP_KERNEL);
+ jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
+ soc_data->nb_channels), GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 6511928b4cdf..b96814a7dceb 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -200,15 +200,20 @@ struct dmatest_done {
wait_queue_head_t *wait;
};
+struct dmatest_data {
+ u8 **raw;
+ u8 **aligned;
+ unsigned int cnt;
+ unsigned int off;
+};
+
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
struct task_struct *task;
struct dma_chan *chan;
- u8 **srcs;
- u8 **usrcs;
- u8 **dsts;
- u8 **udsts;
+ struct dmatest_data src;
+ struct dmatest_data dst;
enum dma_transaction_type type;
wait_queue_head_t done_wait;
struct dmatest_done test_done;
@@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
}
+static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++)
+ kfree(d->raw[i]);
+
+ kfree(d->aligned);
+ kfree(d->raw);
+}
+
+static void dmatest_free_test_data(struct dmatest_data *d)
+{
+ __dmatest_free_test_data(d, d->cnt);
+}
+
+static int dmatest_alloc_test_data(struct dmatest_data *d,
+ unsigned int buf_size, u8 align)
+{
+ unsigned int i = 0;
+
+ d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!d->raw)
+ return -ENOMEM;
+
+ d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!d->aligned)
+ goto err;
+
+ for (i = 0; i < d->cnt; i++) {
+ d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+ if (!d->raw[i])
+ goto err;
+
+ /* align to alignment restriction */
+ if (align)
+ d->aligned[i] = PTR_ALIGN(d->raw[i], align);
+ else
+ d->aligned[i] = d->raw[i];
+ }
+
+ return 0;
+err:
+ __dmatest_free_test_data(d, i);
+ return -ENOMEM;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -511,8 +563,9 @@ static int dmatest_func(void *data)
enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- int src_cnt;
- int dst_cnt;
+ unsigned int buf_size;
+ struct dmatest_data *src;
+ struct dmatest_data *dst;
int i;
ktime_t ktime, start, diff;
ktime_t filltime = 0;
@@ -535,25 +588,27 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
+ src = &thread->src;
+ dst = &thread->dst;
if (thread->type == DMA_MEMCPY) {
align = params->alignment < 0 ? dev->copy_align :
params->alignment;
- src_cnt = dst_cnt = 1;
+ src->cnt = dst->cnt = 1;
} else if (thread->type == DMA_MEMSET) {
align = params->alignment < 0 ? dev->fill_align :
params->alignment;
- src_cnt = dst_cnt = 1;
+ src->cnt = dst->cnt = 1;
is_memset = true;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
- dst_cnt = 1;
+ src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
+ dst->cnt = 1;
align = params->alignment < 0 ? dev->xor_align :
params->alignment;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
- dst_cnt = 2;
+ src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
+ dst->cnt = 2;
align = params->alignment < 0 ? dev->pq_align :
params->alignment;
@@ -561,75 +616,38 @@ static int dmatest_func(void *data)
if (!pq_coefs)
goto err_thread_type;
- for (i = 0; i < src_cnt; i++)
+ for (i = 0; i < src->cnt; i++)
pq_coefs[i] = 1;
} else
goto err_thread_type;
/* Check if buffer count fits into map count variable (u8) */
- if ((src_cnt + dst_cnt) >= 255) {
+ if ((src->cnt + dst->cnt) >= 255) {
pr_err("too many buffers (%d of 255 supported)\n",
- src_cnt + dst_cnt);
+ src->cnt + dst->cnt);
goto err_free_coefs;
}
- if (1 << align > params->buf_size) {
+ buf_size = params->buf_size;
+ if (1 << align > buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
- params->buf_size, 1 << align);
+ buf_size, 1 << align);
goto err_free_coefs;
}
- thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->srcs)
+ if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
- thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->usrcs)
- goto err_usrcs;
-
- for (i = 0; i < src_cnt; i++) {
- thread->usrcs[i] = kmalloc(params->buf_size + align,
- GFP_KERNEL);
- if (!thread->usrcs[i])
- goto err_srcbuf;
-
- /* align srcs to alignment restriction */
- if (align)
- thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
- else
- thread->srcs[i] = thread->usrcs[i];
- }
- thread->srcs[i] = NULL;
-
- thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->dsts)
- goto err_dsts;
-
- thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->udsts)
- goto err_udsts;
-
- for (i = 0; i < dst_cnt; i++) {
- thread->udsts[i] = kmalloc(params->buf_size + align,
- GFP_KERNEL);
- if (!thread->udsts[i])
- goto err_dstbuf;
-
- /* align dsts to alignment restriction */
- if (align)
- thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
- else
- thread->dsts[i] = thread->udsts[i];
- }
- thread->dsts[i] = NULL;
+ if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
+ goto err_src;
set_user_nice(current, 10);
- srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!srcs)
- goto err_dstbuf;
+ goto err_dst;
- dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!dma_pq)
goto err_srcs_array;
@@ -644,21 +662,21 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
- unsigned int src_off, dst_off, len;
+ unsigned int len;
total_tests++;
if (params->transfer_size) {
- if (params->transfer_size >= params->buf_size) {
+ if (params->transfer_size >= buf_size) {
pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
- params->transfer_size, params->buf_size);
+ params->transfer_size, buf_size);
break;
}
len = params->transfer_size;
} else if (params->norandom) {
- len = params->buf_size;
+ len = buf_size;
} else {
- len = dmatest_random() % params->buf_size + 1;
+ len = dmatest_random() % buf_size + 1;
}
/* Do not alter transfer size explicitly defined by user */
@@ -670,57 +688,57 @@ static int dmatest_func(void *data)
total_len += len;
if (params->norandom) {
- src_off = 0;
- dst_off = 0;
+ src->off = 0;
+ dst->off = 0;
} else {
- src_off = dmatest_random() % (params->buf_size - len + 1);
- dst_off = dmatest_random() % (params->buf_size - len + 1);
+ src->off = dmatest_random() % (buf_size - len + 1);
+ dst->off = dmatest_random() % (buf_size - len + 1);
- src_off = (src_off >> align) << align;
- dst_off = (dst_off >> align) << align;
+ src->off = (src->off >> align) << align;
+ dst->off = (dst->off >> align) << align;
}
if (!params->noverify) {
start = ktime_get();
- dmatest_init_srcs(thread->srcs, src_off, len,
- params->buf_size, is_memset);
- dmatest_init_dsts(thread->dsts, dst_off, len,
- params->buf_size, is_memset);
+ dmatest_init_srcs(src->aligned, src->off, len,
+ buf_size, is_memset);
+ dmatest_init_dsts(dst->aligned, dst->off, len,
+ buf_size, is_memset);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
result("unmap data NULL", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
continue;
}
- um->len = params->buf_size;
- for (i = 0; i < src_cnt; i++) {
- void *buf = thread->srcs[i];
+ um->len = buf_size;
+ for (i = 0; i < src->cnt; i++) {
+ void *buf = src->aligned[i];
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
- srcs[i] = um->addr[i] + src_off;
+ srcs[i] = um->addr[i] + src->off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
result("src mapping error", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
goto error_unmap_continue;
}
um->to_cnt++;
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
- dsts = &um->addr[src_cnt];
- for (i = 0; i < dst_cnt; i++) {
- void *buf = thread->dsts[i];
+ dsts = &um->addr[src->cnt];
+ for (i = 0; i < dst->cnt; i++) {
+ void *buf = dst->aligned[i];
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
@@ -729,7 +747,7 @@ static int dmatest_func(void *data)
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
result("dst mapping error", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
goto error_unmap_continue;
}
um->bidi_cnt++;
@@ -737,29 +755,29 @@ static int dmatest_func(void *data)
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
- dsts[0] + dst_off,
+ dsts[0] + dst->off,
srcs[0], len, flags);
else if (thread->type == DMA_MEMSET)
tx = dev->device_prep_dma_memset(chan,
- dsts[0] + dst_off,
- *(thread->srcs[0] + src_off),
+ dsts[0] + dst->off,
+ *(src->aligned[0] + src->off),
len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
- dsts[0] + dst_off,
- srcs, src_cnt,
+ dsts[0] + dst->off,
+ srcs, src->cnt,
len, flags);
else if (thread->type == DMA_PQ) {
- for (i = 0; i < dst_cnt; i++)
- dma_pq[i] = dsts[i] + dst_off;
+ for (i = 0; i < dst->cnt; i++)
+ dma_pq[i] = dsts[i] + dst->off;
tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
- src_cnt, pq_coefs,
+ src->cnt, pq_coefs,
len, flags);
}
if (!tx) {
- result("prep error", total_tests, src_off,
- dst_off, len, ret);
+ result("prep error", total_tests, src->off,
+ dst->off, len, ret);
msleep(100);
goto error_unmap_continue;
}
@@ -770,8 +788,8 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- result("submit error", total_tests, src_off,
- dst_off, len, ret);
+ result("submit error", total_tests, src->off,
+ dst->off, len, ret);
msleep(100);
goto error_unmap_continue;
}
@@ -783,58 +801,58 @@ static int dmatest_func(void *data)
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (!done->done) {
- result("test timed out", total_tests, src_off, dst_off,
+ result("test timed out", total_tests, src->off, dst->off,
len, 0);
goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
result(status == DMA_ERROR ?
"completion error status" :
- "completion busy status", total_tests, src_off,
- dst_off, len, ret);
+ "completion busy status", total_tests, src->off,
+ dst->off, len, ret);
goto error_unmap_continue;
}
dmaengine_unmap_put(um);
if (params->noverify) {
- verbose_result("test passed", total_tests, src_off,
- dst_off, len, 0);
+ verbose_result("test passed", total_tests, src->off,
+ dst->off, len, 0);
continue;
}
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
- error_count = dmatest_verify(thread->srcs, 0, src_off,
+ error_count = dmatest_verify(src->aligned, 0, src->off,
0, PATTERN_SRC, true, is_memset);
- error_count += dmatest_verify(thread->srcs, src_off,
- src_off + len, src_off,
+ error_count += dmatest_verify(src->aligned, src->off,
+ src->off + len, src->off,
PATTERN_SRC | PATTERN_COPY, true, is_memset);
- error_count += dmatest_verify(thread->srcs, src_off + len,
- params->buf_size, src_off + len,
+ error_count += dmatest_verify(src->aligned, src->off + len,
+ buf_size, src->off + len,
PATTERN_SRC, true, is_memset);
pr_debug("%s: verifying dest buffer...\n", current->comm);
- error_count += dmatest_verify(thread->dsts, 0, dst_off,
+ error_count += dmatest_verify(dst->aligned, 0, dst->off,
0, PATTERN_DST, false, is_memset);
- error_count += dmatest_verify(thread->dsts, dst_off,
- dst_off + len, src_off,
+ error_count += dmatest_verify(dst->aligned, dst->off,
+ dst->off + len, src->off,
PATTERN_SRC | PATTERN_COPY, false, is_memset);
- error_count += dmatest_verify(thread->dsts, dst_off + len,
- params->buf_size, dst_off + len,
+ error_count += dmatest_verify(dst->aligned, dst->off + len,
+ buf_size, dst->off + len,
PATTERN_DST, false, is_memset);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
if (error_count) {
- result("data error", total_tests, src_off, dst_off,
+ result("data error", total_tests, src->off, dst->off,
len, error_count);
failed_tests++;
} else {
- verbose_result("test passed", total_tests, src_off,
- dst_off, len, 0);
+ verbose_result("test passed", total_tests, src->off,
+ dst->off, len, 0);
}
continue;
@@ -852,19 +870,10 @@ error_unmap_continue:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
-err_dstbuf:
- for (i = 0; thread->udsts[i]; i++)
- kfree(thread->udsts[i]);
- kfree(thread->udsts);
-err_udsts:
- kfree(thread->dsts);
-err_dsts:
-err_srcbuf:
- for (i = 0; thread->usrcs[i]; i++)
- kfree(thread->usrcs[i]);
- kfree(thread->usrcs);
-err_usrcs:
- kfree(thread->srcs);
+err_dst:
+ dmatest_free_test_data(dst);
+err_src:
+ dmatest_free_test_data(src);
err_free_coefs:
kfree(pq_coefs);
err_thread_type:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index f8888dc0b8dc..18b6014cf9b4 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -75,7 +75,7 @@ struct __packed axi_dma_lli {
__le32 sstat;
__le32 dstat;
__le32 status_lo;
- __le32 ststus_hi;
+ __le32 status_hi;
__le32 reserved_lo;
__le32 reserved_hi;
};
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 04b9728c1d26..e5162690de8f 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
#
# DMA engine configuration for dw
#
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index 2b949c2e4504..63ed895c09aa 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
-dw_dmac_core-objs := core.o
+dw_dmac_core-objs := core.o dw.o idma32.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
dw_dmac-objs := platform.o
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index dc053e62f894..21cb2a58dbd2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Core driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -37,27 +34,6 @@
* support descriptor writeback.
*/
-#define DWC_DEFAULT_CTLLO(_chan) ({ \
- struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
- struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- bool _is_slave = is_slave_direction(_dwc->direction); \
- u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
- DW_DMA_MSIZE_16; \
- u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
- DW_DMA_MSIZE_16; \
- u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
- _dwc->dws.p_master : _dwc->dws.m_master; \
- u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
- _dwc->dws.p_master : _dwc->dws.m_master; \
- \
- (DWC_CTLL_DST_MSIZE(_dmsize) \
- | DWC_CTLL_SRC_MSIZE(_smsize) \
- | DWC_CTLL_LLP_D_EN \
- | DWC_CTLL_LLP_S_EN \
- | DWC_CTLL_DMS(_dms) \
- | DWC_CTLL_SMS(_sms)); \
- })
-
/* The set of bus widths supported by the DMA controller */
#define DW_DMA_BUSWIDTHS \
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
@@ -138,44 +114,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
dwc->descs_allocated--;
}
-static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
-{
- u32 cfghi = 0;
- u32 cfglo = 0;
-
- /* Set default burst alignment */
- cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
-
- /* Low 4 bits of the request lines */
- cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
- cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
-
- /* Request line extension (2 bits) */
- cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
- cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-}
-
-static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- u32 cfghi = DWC_CFGH_FIFO_MODE;
- u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
- bool hs_polarity = dwc->dws.hs_polarity;
-
- cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
- cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
-
- /* Set polarity of handshake interface */
- cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-}
-
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -183,10 +121,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
return;
- if (dw->pdata->is_idma32)
- dwc_initialize_chan_idma32(dwc);
- else
- dwc_initialize_chan_dw(dwc);
+ dw->initialize_chan(dwc);
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
@@ -215,37 +150,6 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
cpu_relax();
}
-static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
- unsigned int width, size_t *len)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- u32 block;
-
- /* Always in bytes for iDMA 32-bit */
- if (dw->pdata->is_idma32)
- width = 0;
-
- if ((bytes >> width) > dwc->block_size) {
- block = dwc->block_size;
- *len = block << width;
- } else {
- block = bytes >> width;
- *len = bytes;
- }
-
- return block;
-}
-
-static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-
- if (dw->pdata->is_idma32)
- return IDMA32C_CTLH_BLOCK_TS(block);
-
- return DWC_CTLH_BLOCK_TS(block) << width;
-}
-
/*----------------------------------------------------------------------*/
/* Perform single block transfer */
@@ -391,10 +295,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* Returns how many bytes were already received from source */
static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 ctlhi = channel_readl(dwc, CTL_HI);
u32 ctllo = channel_readl(dwc, CTL_LO);
- return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
+ return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -651,7 +556,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
unsigned int src_width;
unsigned int dst_width;
unsigned int data_width = dw->pdata->data_width[m_master];
- u32 ctllo;
+ u32 ctllo, ctlhi;
u8 lms = DWC_LLP_LMS(m_master);
dev_vdbg(chan2dev(chan),
@@ -667,7 +572,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
src_width = dst_width = __ffs(data_width | src | dest | len);
- ctllo = DWC_DEFAULT_CTLLO(chan)
+ ctllo = dw->prepare_ctllo(dwc)
| DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC
@@ -680,10 +585,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (!desc)
goto err_desc_get;
+ ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
+
lli_write(desc, sar, src + offset);
lli_write(desc, dar, dest + offset);
lli_write(desc, ctllo, ctllo);
- lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
+ lli_write(desc, ctlhi, ctlhi);
desc->len = xfer_count;
if (!first) {
@@ -721,7 +628,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
- u32 ctllo;
+ u32 ctllo, ctlhi;
u8 m_master = dwc->dws.m_master;
u8 lms = DWC_LLP_LMS(m_master);
dma_addr_t reg;
@@ -745,10 +652,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
case DMA_MEM_TO_DEV:
reg_width = __ffs(sconfig->dst_addr_width);
reg = sconfig->dst_addr;
- ctllo = (DWC_DEFAULT_CTLLO(chan)
+ ctllo = dw->prepare_ctllo(dwc)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
- | DWC_CTLL_SRC_INC);
+ | DWC_CTLL_SRC_INC;
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
@@ -768,9 +675,11 @@ slave_sg_todev_fill_desc:
if (!desc)
goto err_desc_get;
+ ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
+
lli_write(desc, sar, mem);
lli_write(desc, dar, reg);
- lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
+ lli_write(desc, ctlhi, ctlhi);
lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
desc->len = dlen;
@@ -793,10 +702,10 @@ slave_sg_todev_fill_desc:
case DMA_DEV_TO_MEM:
reg_width = __ffs(sconfig->src_addr_width);
reg = sconfig->src_addr;
- ctllo = (DWC_DEFAULT_CTLLO(chan)
+ ctllo = dw->prepare_ctllo(dwc)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
- | DWC_CTLL_SRC_FIX);
+ | DWC_CTLL_SRC_FIX;
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
@@ -814,9 +723,11 @@ slave_sg_fromdev_fill_desc:
if (!desc)
goto err_desc_get;
+ ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
+
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
- lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
+ lli_write(desc, ctlhi, ctlhi);
mem_width = __ffs(data_width | mem | dlen);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
@@ -876,22 +787,12 @@ EXPORT_SYMBOL_GPL(dw_dma_filter);
static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dma_slave_config *sc = &dwc->dma_sconfig;
struct dw_dma *dw = to_dw_dma(chan->device);
- /*
- * Fix sconfig's burst size according to dw_dmac. We need to convert
- * them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- *
- * NOTE: burst size 2 is not supported by DesignWare controller.
- * iDMA 32-bit supports it.
- */
- u32 s = dw->pdata->is_idma32 ? 1 : 2;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
- sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
- sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
return 0;
}
@@ -900,16 +801,9 @@ static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
unsigned int count = 20; /* timeout iterations */
- u32 cfglo;
- cfglo = channel_readl(dwc, CFG_LO);
- if (dw->pdata->is_idma32) {
- if (drain)
- cfglo |= IDMA32C_CFGL_CH_DRAIN;
- else
- cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
- }
- channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ dw->suspend_chan(dwc, drain);
+
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
udelay(2);
@@ -928,11 +822,11 @@ static int dwc_pause(struct dma_chan *chan)
return 0;
}
-static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
{
- u32 cfglo = channel_readl(dwc, CFG_LO);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dw->resume_chan(dwc, drain);
clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
}
@@ -945,7 +839,7 @@ static int dwc_resume(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
- dwc_chan_resume(dwc);
+ dwc_chan_resume(dwc, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -968,7 +862,7 @@ static int dwc_terminate_all(struct dma_chan *chan)
dwc_chan_disable(dw, dwc);
- dwc_chan_resume(dwc);
+ dwc_chan_resume(dwc, true);
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
@@ -1058,33 +952,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
/*----------------------------------------------------------------------*/
-/*
- * Program FIFO size of channels.
- *
- * By default full FIFO (512 bytes) is assigned to channel 0. Here we
- * slice FIFO on equal parts between channels.
- */
-static void idma32_fifo_partition(struct dw_dma *dw)
-{
- u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
- IDMA32C_FP_UPDATE;
- u64 fifo_partition = 0;
-
- if (!dw->pdata->is_idma32)
- return;
-
- /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
- fifo_partition |= value << 0;
-
- /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
- fifo_partition |= value << 32;
-
- /* Program FIFO Partition registers - 64 bytes per channel */
- idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
- idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
-}
-
-static void dw_dma_off(struct dw_dma *dw)
+void do_dw_dma_off(struct dw_dma *dw)
{
unsigned int i;
@@ -1103,7 +971,7 @@ static void dw_dma_off(struct dw_dma *dw)
clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
}
-static void dw_dma_on(struct dw_dma *dw)
+void do_dw_dma_on(struct dw_dma *dw)
{
dma_writel(dw, CFG, DW_CFG_DMA_EN);
}
@@ -1139,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
/* Enable controller here if needed */
if (!dw->in_use)
- dw_dma_on(dw);
+ do_dw_dma_on(dw);
dw->in_use |= dwc->mask;
return 0;
@@ -1150,7 +1018,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags;
- LIST_HEAD(list);
dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
dwc->descs_allocated);
@@ -1177,30 +1044,25 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable controller in case it was a last user */
dw->in_use &= ~dwc->mask;
if (!dw->in_use)
- dw_dma_off(dw);
+ do_dw_dma_off(dw);
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-int dw_dma_probe(struct dw_dma_chip *chip)
+int do_dma_probe(struct dw_dma_chip *chip)
{
+ struct dw_dma *dw = chip->dw;
struct dw_dma_platform_data *pdata;
- struct dw_dma *dw;
bool autocfg = false;
unsigned int dw_params;
unsigned int i;
int err;
- dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
- if (!dw)
- return -ENOMEM;
-
dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
if (!dw->pdata)
return -ENOMEM;
dw->regs = chip->regs;
- chip->dw = dw;
pm_runtime_get_sync(chip->dev);
@@ -1227,8 +1089,6 @@ int dw_dma_probe(struct dw_dma_chip *chip)
pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
/* Fill platform data with the default values */
- pdata->is_private = true;
- pdata->is_memcpy = true;
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
@@ -1252,15 +1112,10 @@ int dw_dma_probe(struct dw_dma_chip *chip)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
/* Force dma off, just in case */
- dw_dma_off(dw);
-
- idma32_fifo_partition(dw);
+ dw->disable(dw);
/* Device and instance ID for IRQ and DMA pool */
- if (pdata->is_idma32)
- snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
- else
- snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
+ dw->set_device_name(dw, chip->id);
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
@@ -1340,10 +1195,8 @@ int dw_dma_probe(struct dw_dma_chip *chip)
/* Set capabilities */
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
- if (pdata->is_private)
- dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
- if (pdata->is_memcpy)
- dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dw->dma.dev = chip->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
@@ -1384,16 +1237,15 @@ err_pdata:
pm_runtime_put_sync_suspend(chip->dev);
return err;
}
-EXPORT_SYMBOL_GPL(dw_dma_probe);
-int dw_dma_remove(struct dw_dma_chip *chip)
+int do_dma_remove(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
struct dw_dma_chan *dwc, *_dwc;
pm_runtime_get_sync(chip->dev);
- dw_dma_off(dw);
+ do_dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
free_irq(chip->irq, dw);
@@ -1408,27 +1260,24 @@ int dw_dma_remove(struct dw_dma_chip *chip)
pm_runtime_put_sync_suspend(chip->dev);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_remove);
-int dw_dma_disable(struct dw_dma_chip *chip)
+int do_dw_dma_disable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
- dw_dma_off(dw);
+ dw->disable(dw);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_disable);
+EXPORT_SYMBOL_GPL(do_dw_dma_disable);
-int dw_dma_enable(struct dw_dma_chip *chip)
+int do_dw_dma_enable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
- idma32_fifo_partition(dw);
-
- dw_dma_on(dw);
+ dw->enable(dw);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_enable);
+EXPORT_SYMBOL_GPL(do_dw_dma_enable);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
new file mode 100644
index 000000000000..7a085b3c1854
--- /dev/null
+++ b/drivers/dma/dw/dw.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2007-2008 Atmel Corporation
+// Copyright (C) 2010-2011 ST Microelectronics
+// Copyright (C) 2013,2018 Intel Corporation
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "internal.h"
+
+static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ bool hs_polarity = dwc->dws.hs_polarity;
+
+ cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
+ cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
+
+ /* Set polarity of handshake interface */
+ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+}
+
+static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+}
+
+static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc,
+ size_t bytes, unsigned int width, size_t *len)
+{
+ u32 block;
+
+ if ((bytes >> width) > dwc->block_size) {
+ block = dwc->block_size;
+ *len = dwc->block_size << width;
+ } else {
+ block = bytes >> width;
+ *len = bytes;
+ }
+
+ return block;
+}
+
+static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+ return DWC_CTLH_BLOCK_TS(block) << width;
+}
+
+static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
+{
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+ bool is_slave = is_slave_direction(dwc->direction);
+ u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16;
+ u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16;
+ u8 p_master = dwc->dws.p_master;
+ u8 m_master = dwc->dws.m_master;
+ u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
+ u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master;
+
+ return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
+ DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
+ DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
+}
+
+static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
+{
+ /*
+ * Fix burst size according to dw_dmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ */
+ *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0;
+}
+
+static void dw_dma_set_device_name(struct dw_dma *dw, int id)
+{
+ snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id);
+}
+
+static void dw_dma_disable(struct dw_dma *dw)
+{
+ do_dw_dma_off(dw);
+}
+
+static void dw_dma_enable(struct dw_dma *dw)
+{
+ do_dw_dma_on(dw);
+}
+
+int dw_dma_probe(struct dw_dma_chip *chip)
+{
+ struct dw_dma *dw;
+
+ dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ /* Channel operations */
+ dw->initialize_chan = dw_dma_initialize_chan;
+ dw->suspend_chan = dw_dma_suspend_chan;
+ dw->resume_chan = dw_dma_resume_chan;
+ dw->prepare_ctllo = dw_dma_prepare_ctllo;
+ dw->encode_maxburst = dw_dma_encode_maxburst;
+ dw->bytes2block = dw_dma_bytes2block;
+ dw->block2bytes = dw_dma_block2bytes;
+
+ /* Device operations */
+ dw->set_device_name = dw_dma_set_device_name;
+ dw->disable = dw_dma_disable;
+ dw->enable = dw_dma_enable;
+
+ chip->dw = dw;
+ return do_dma_probe(chip);
+}
+EXPORT_SYMBOL_GPL(dw_dma_probe);
+
+int dw_dma_remove(struct dw_dma_chip *chip)
+{
+ return do_dma_remove(chip);
+}
+EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
new file mode 100644
index 000000000000..f00657308811
--- /dev/null
+++ b/drivers/dma/dw/idma32.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013,2018 Intel Corporation
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "internal.h"
+
+static void idma32_initialize_chan(struct dw_dma_chan *dwc)
+{
+ u32 cfghi = 0;
+ u32 cfglo = 0;
+
+ /* Set default burst alignment */
+ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+ /* Low 4 bits of the request lines */
+ cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
+ cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
+
+ /* Request line extension (2 bits) */
+ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
+ cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ if (drain)
+ cfglo |= IDMA32C_CFGL_CH_DRAIN;
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+}
+
+static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ if (drain)
+ cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+}
+
+static u32 idma32_bytes2block(struct dw_dma_chan *dwc,
+ size_t bytes, unsigned int width, size_t *len)
+{
+ u32 block;
+
+ if (bytes > dwc->block_size) {
+ block = dwc->block_size;
+ *len = dwc->block_size;
+ } else {
+ block = bytes;
+ *len = bytes;
+ }
+
+ return block;
+}
+
+static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+ return IDMA32C_CTLH_BLOCK_TS(block);
+}
+
+static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
+{
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+ bool is_slave = is_slave_direction(dwc->direction);
+ u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8;
+ u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8;
+
+ return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
+ DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
+}
+
+static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
+{
+ *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
+}
+
+static void idma32_set_device_name(struct dw_dma *dw, int id)
+{
+ snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
+}
+
+/*
+ * Program FIFO size of channels.
+ *
+ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
+ * slice FIFO on equal parts between channels.
+ */
+static void idma32_fifo_partition(struct dw_dma *dw)
+{
+ u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
+ IDMA32C_FP_UPDATE;
+ u64 fifo_partition = 0;
+
+ /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
+ fifo_partition |= value << 0;
+
+ /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+ fifo_partition |= value << 32;
+
+ /* Program FIFO Partition registers - 64 bytes per channel */
+ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+ idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+}
+
+static void idma32_disable(struct dw_dma *dw)
+{
+ do_dw_dma_off(dw);
+ idma32_fifo_partition(dw);
+}
+
+static void idma32_enable(struct dw_dma *dw)
+{
+ idma32_fifo_partition(dw);
+ do_dw_dma_on(dw);
+}
+
+int idma32_dma_probe(struct dw_dma_chip *chip)
+{
+ struct dw_dma *dw;
+
+ dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ /* Channel operations */
+ dw->initialize_chan = idma32_initialize_chan;
+ dw->suspend_chan = idma32_suspend_chan;
+ dw->resume_chan = idma32_resume_chan;
+ dw->prepare_ctllo = idma32_prepare_ctllo;
+ dw->encode_maxburst = idma32_encode_maxburst;
+ dw->bytes2block = idma32_bytes2block;
+ dw->block2bytes = idma32_block2bytes;
+
+ /* Device operations */
+ dw->set_device_name = idma32_set_device_name;
+ dw->disable = idma32_disable;
+ dw->enable = idma32_enable;
+
+ chip->dw = dw;
+ return do_dma_probe(chip);
+}
+EXPORT_SYMBOL_GPL(idma32_dma_probe);
+
+int idma32_dma_remove(struct dw_dma_chip *chip)
+{
+ return do_dma_remove(chip);
+}
+EXPORT_SYMBOL_GPL(idma32_dma_remove);
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 41439732ff6b..1dd7a4e6dd23 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_DW_INTERNAL_H
@@ -15,8 +12,14 @@
#include "regs.h"
-int dw_dma_disable(struct dw_dma_chip *chip);
-int dw_dma_enable(struct dw_dma_chip *chip);
+int do_dma_probe(struct dw_dma_chip *chip);
+int do_dma_remove(struct dw_dma_chip *chip);
+
+void do_dw_dma_on(struct dw_dma *dw);
+void do_dw_dma_off(struct dw_dma *dw);
+
+int do_dw_dma_disable(struct dw_dma_chip *chip);
+int do_dw_dma_enable(struct dw_dma_chip *chip);
extern bool dw_dma_filter(struct dma_chan *chan, void *param);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 7778ed705a1a..e79a75db0852 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2013 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -15,21 +12,33 @@
#include "internal.h"
-static struct dw_dma_platform_data mrfld_pdata = {
+struct dw_dma_pci_data {
+ const struct dw_dma_platform_data *pdata;
+ int (*probe)(struct dw_dma_chip *chip);
+};
+
+static const struct dw_dma_pci_data dw_pci_data = {
+ .probe = dw_dma_probe,
+};
+
+static const struct dw_dma_platform_data idma32_pdata = {
.nr_channels = 8,
- .is_private = true,
- .is_memcpy = true,
- .is_idma32 = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 131071,
.nr_masters = 1,
.data_width = {4},
+ .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+static const struct dw_dma_pci_data idma32_pci_data = {
+ .pdata = &idma32_pdata,
+ .probe = idma32_dma_probe,
};
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
- const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
+ const struct dw_dma_pci_data *data = (void *)pid->driver_data;
struct dw_dma_chip *chip;
int ret;
@@ -62,9 +71,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
chip->id = pdev->devfn;
chip->regs = pcim_iomap_table(pdev)[0];
chip->irq = pdev->irq;
- chip->pdata = pdata;
+ chip->pdata = data->pdata;
- ret = dw_dma_probe(chip);
+ ret = data->probe(chip);
if (ret)
return ret;
@@ -90,7 +99,7 @@ static int dw_pci_suspend_late(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
- return dw_dma_disable(chip);
+ return do_dw_dma_disable(chip);
};
static int dw_pci_resume_early(struct device *dev)
@@ -98,7 +107,7 @@ static int dw_pci_resume_early(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
- return dw_dma_enable(chip);
+ return do_dw_dma_enable(chip);
};
#endif /* CONFIG_PM_SLEEP */
@@ -109,24 +118,24 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield (GPDMA) */
- { PCI_VDEVICE(INTEL, 0x0827) },
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
/* BayTrail */
- { PCI_VDEVICE(INTEL, 0x0f06) },
- { PCI_VDEVICE(INTEL, 0x0f40) },
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
- /* Merrifield iDMA 32-bit (GPDMA) */
- { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata },
+ /* Merrifield */
+ { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
/* Braswell */
- { PCI_VDEVICE(INTEL, 0x2286) },
- { PCI_VDEVICE(INTEL, 0x22c0) },
+ { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
/* Haswell */
- { PCI_VDEVICE(INTEL, 0x9c60) },
+ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
/* Broadwell */
- { PCI_VDEVICE(INTEL, 0x9ce0) },
+ { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
{ }
};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 31ff8113c3de..382dfd9e9600 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Platform driver for the Synopsys DesignWare DMA Controller
*
@@ -6,10 +7,6 @@
* Copyright (C) 2013 Intel Corporation
*
* Some parts of this driver are derived from the original dw_dmac.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -128,15 +125,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
- if (of_property_read_bool(np, "is_private"))
- pdata->is_private = true;
-
- /*
- * All known devices, which use DT for configuration, support
- * memory-to-memory transfers. So enable it by default.
- */
- pdata->is_memcpy = true;
-
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
@@ -264,7 +252,7 @@ static void dw_shutdown(struct platform_device *pdev)
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
/*
- * We have to call dw_dma_disable() to stop any ongoing transfer. On
+ * We have to call do_dw_dma_disable() to stop any ongoing transfer. On
* some platforms we can't do that since DMA device is powered off.
* Moreover we have no possibility to check if the platform is affected
* or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
@@ -273,7 +261,7 @@ static void dw_shutdown(struct platform_device *pdev)
* used by the driver.
*/
pm_runtime_get_sync(chip->dev);
- dw_dma_disable(chip);
+ do_dw_dma_disable(chip);
pm_runtime_put_sync_suspend(chip->dev);
clk_disable_unprepare(chip->clk);
@@ -303,7 +291,7 @@ static int dw_suspend_late(struct device *dev)
{
struct dw_dma_chip *chip = dev_get_drvdata(dev);
- dw_dma_disable(chip);
+ do_dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
return 0;
@@ -318,7 +306,7 @@ static int dw_resume_early(struct device *dev)
if (ret)
return ret;
- return dw_dma_enable(chip);
+ return do_dw_dma_enable(chip);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 646c9c960c07..3fce66ecee7a 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2016 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -222,6 +219,16 @@ enum dw_dma_msize {
/* iDMA 32-bit support */
+/* bursts size */
+enum idma32_msize {
+ IDMA32_MSIZE_1,
+ IDMA32_MSIZE_2,
+ IDMA32_MSIZE_4,
+ IDMA32_MSIZE_8,
+ IDMA32_MSIZE_16,
+ IDMA32_MSIZE_32,
+};
+
/* Bitfields in CTL_HI */
#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0)
#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
@@ -312,6 +319,21 @@ struct dw_dma {
u8 all_chan_mask;
u8 in_use;
+ /* Channel operations */
+ void (*initialize_chan)(struct dw_dma_chan *dwc);
+ void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
+ void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
+ u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
+ void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
+ u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
+ unsigned int width, size_t *len);
+ size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
+
+ /* Device operations */
+ void (*set_device_name)(struct dw_dma *dw, int id);
+ void (*disable)(struct dw_dma *dw);
+ void (*enable)(struct dw_dma *dw);
+
/* platform data */
struct dw_dma_platform_data *pdata;
};
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 8876c4c1bb2c..680b2a00a953 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -6,6 +6,7 @@
#include <linux/dmapool.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include "fsl-edma-common.h"
@@ -173,12 +174,62 @@ int fsl_edma_resume(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(fsl_edma_resume);
+static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
+{
+ if (fsl_chan->dma_dir != DMA_NONE)
+ dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
+ fsl_chan->dma_dev_addr,
+ fsl_chan->dma_dev_size,
+ fsl_chan->dma_dir, 0);
+ fsl_chan->dma_dir = DMA_NONE;
+}
+
+static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
+ enum dma_transfer_direction dir)
+{
+ struct device *dev = fsl_chan->vchan.chan.device->dev;
+ enum dma_data_direction dma_dir;
+ phys_addr_t addr = 0;
+ u32 size = 0;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ dma_dir = DMA_FROM_DEVICE;
+ addr = fsl_chan->cfg.dst_addr;
+ size = fsl_chan->cfg.dst_maxburst;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dir = DMA_TO_DEVICE;
+ addr = fsl_chan->cfg.src_addr;
+ size = fsl_chan->cfg.src_maxburst;
+ break;
+ default:
+ dma_dir = DMA_NONE;
+ break;
+ }
+
+ /* Already mapped for this config? */
+ if (fsl_chan->dma_dir == dma_dir)
+ return true;
+
+ fsl_edma_unprep_slave_dma(fsl_chan);
+
+ fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
+ if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
+ return false;
+ fsl_chan->dma_dev_size = size;
+ fsl_chan->dma_dir = dma_dir;
+
+ return true;
+}
+
int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
+ fsl_edma_unprep_slave_dma(fsl_chan);
return 0;
}
@@ -339,9 +390,7 @@ static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_desc *fsl_desc;
int i;
- fsl_desc = kzalloc(sizeof(*fsl_desc) +
- sizeof(struct fsl_edma_sw_tcd) *
- sg_len, GFP_NOWAIT);
+ fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
if (!fsl_desc)
return NULL;
@@ -378,6 +427,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
if (!is_slave_direction(direction))
return NULL;
+ if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
+ return NULL;
+
sg_len = buf_len / period_len;
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
@@ -409,11 +461,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
if (direction == DMA_MEM_TO_DEV) {
src_addr = dma_buf_next;
- dst_addr = fsl_chan->cfg.dst_addr;
+ dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
} else {
- src_addr = fsl_chan->cfg.src_addr;
+ src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
@@ -444,6 +496,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
if (!is_slave_direction(direction))
return NULL;
+ if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
+ return NULL;
+
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
return NULL;
@@ -468,11 +523,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
- dst_addr = fsl_chan->cfg.dst_addr;
+ dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
} else {
- src_addr = fsl_chan->cfg.src_addr;
+ src_addr = fsl_chan->dma_dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
@@ -555,6 +610,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_edma_chan_mux(fsl_chan, 0, false);
fsl_chan->edesc = NULL;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ fsl_edma_unprep_slave_dma(fsl_chan);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 8917e8865959..b435d8e1e3a1 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -6,6 +6,7 @@
#ifndef _FSL_EDMA_COMMON_H_
#define _FSL_EDMA_COMMON_H_
+#include <linux/dma-direction.h>
#include "virt-dma.h"
#define EDMA_CR_EDBG BIT(1)
@@ -120,6 +121,9 @@ struct fsl_edma_chan {
struct dma_slave_config cfg;
u32 attr;
struct dma_pool *tcd_pool;
+ dma_addr_t dma_dev_addr;
+ u32 dma_dev_size;
+ enum dma_data_direction dma_dir;
};
struct fsl_edma_desc {
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 34d70112fcc9..75e8a7ba3a22 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -254,6 +254,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->pm_state = RUNNING;
fsl_chan->slave_id = 0;
fsl_chan->idle = true;
+ fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 000000000000..aa1d0ae3d207
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1259 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2014-2015 Freescale
+// Copyright 2018 NXP
+
+/*
+ * Driver for NXP Layerscape Queue Direct Memory Access Controller
+ *
+ * Author:
+ * Wen He <wen.he_1@nxp.com>
+ * Jiaheng Fan <jiaheng.fan@nxp.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_dma.h>
+#include <linux/dma-mapping.h>
+
+#include "virt-dma.h"
+#include "fsldma.h"
+
+/* Register related definition */
+#define FSL_QDMA_DMR 0x0
+#define FSL_QDMA_DSR 0x4
+#define FSL_QDMA_DEIER 0xe00
+#define FSL_QDMA_DEDR 0xe04
+#define FSL_QDMA_DECFDW0R 0xe10
+#define FSL_QDMA_DECFDW1R 0xe14
+#define FSL_QDMA_DECFDW2R 0xe18
+#define FSL_QDMA_DECFDW3R 0xe1c
+#define FSL_QDMA_DECFQIDR 0xe30
+#define FSL_QDMA_DECBR 0xe34
+
+#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQDPAR 0x80c
+#define FSL_QDMA_SQEPAR 0x814
+#define FSL_QDMA_BSQMR 0x800
+#define FSL_QDMA_BSQSR 0x804
+#define FSL_QDMA_BSQICR 0x828
+#define FSL_QDMA_CQMR 0xa00
+#define FSL_QDMA_CQDSCR1 0xa08
+#define FSL_QDMA_CQDSCR2 0xa0c
+#define FSL_QDMA_CQIER 0xa10
+#define FSL_QDMA_CQEDR 0xa14
+#define FSL_QDMA_SQCCMR 0xa20
+
+/* Registers for bit and genmask */
+#define FSL_QDMA_CQIDR_SQT BIT(15)
+#define QDMA_CCDF_FOTMAT BIT(29)
+#define QDMA_CCDF_SER BIT(30)
+#define QDMA_SG_FIN BIT(30)
+#define QDMA_SG_LEN_MASK GENMASK(29, 0)
+#define QDMA_CCDF_MASK GENMASK(28, 20)
+
+#define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
+#define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
+#define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
+
+#define FSL_QDMA_BCQIER_CQTIE BIT(15)
+#define FSL_QDMA_BCQIER_CQPEIE BIT(23)
+#define FSL_QDMA_BSQICR_ICEN BIT(31)
+
+#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
+#define FSL_QDMA_CQIER_MEIE BIT(31)
+#define FSL_QDMA_CQIER_TEIE BIT(0)
+#define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
+
+#define FSL_QDMA_BCQMR_EN BIT(31)
+#define FSL_QDMA_BCQMR_EI BIT(30)
+#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF BIT(16)
+#define FSL_QDMA_BCQSR_XOFF BIT(0)
+
+#define FSL_QDMA_BSQMR_EN BIT(31)
+#define FSL_QDMA_BSQMR_DI BIT(30)
+#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE BIT(17)
+
+#define FSL_QDMA_DMR_DQD BIT(30)
+#define FSL_QDMA_DSR_DB BIT(31)
+
+/* Size related definition */
+#define FSL_QDMA_QUEUE_MAX 8
+#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
+#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
+#define FSL_QDMA_QUEUE_NUM_MAX 8
+
+/* Field definition for CMD */
+#define FSL_QDMA_CMD_RWTTYPE 0x4
+#define FSL_QDMA_CMD_LWC 0x2
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
+#define FSL_QDMA_CMD_NS_OFFSET 27
+#define FSL_QDMA_CMD_DQOS_OFFSET 24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
+#define FSL_QDMA_CMD_DSEN_OFFSET 19
+#define FSL_QDMA_CMD_LWC_OFFSET 16
+
+/* Field definition for Descriptor offset */
+#define QDMA_CCDF_STATUS 20
+#define QDMA_CCDF_OFFSET 20
+
+/* Field definition for safe loop count*/
+#define FSL_QDMA_HALT_COUNT 1500
+#define FSL_QDMA_MAX_SIZE 16385
+#define FSL_QDMA_COMP_TIMEOUT 1000
+#define FSL_COMMAND_QUEUE_OVERFLLOW 10
+
+#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
+ (((fsl_qdma_engine)->block_offset) * (x))
+
+/**
+ * struct fsl_qdma_format - This is the struct holding describing compound
+ * descriptor format with qDMA.
+ * @status: Command status and enqueue status notification.
+ * @cfg: Frame offset and frame format.
+ * @addr_lo: Holding the compound descriptor of the lower
+ * 32-bits address in memory 40-bit address.
+ * @addr_hi: Same as above member, but point high 8-bits in
+ * memory 40-bit address.
+ * @__reserved1: Reserved field.
+ * @cfg8b_w1: Compound descriptor command queue origin produced
+ * by qDMA and dynamic debug field.
+ * @data Pointer to the memory 40-bit address, describes DMA
+ * source information and DMA destination information.
+ */
+struct fsl_qdma_format {
+ __le32 status;
+ __le32 cfg;
+ union {
+ struct {
+ __le32 addr_lo;
+ u8 addr_hi;
+ u8 __reserved1[2];
+ u8 cfg8b_w1;
+ } __packed;
+ __le64 data;
+ };
+} __packed;
+
+/* qDMA status notification pre information */
+struct fsl_pre_status {
+ u64 addr;
+ u8 queue;
+};
+
+static DEFINE_PER_CPU(struct fsl_pre_status, pre);
+
+struct fsl_qdma_chan {
+ struct virt_dma_chan vchan;
+ struct virt_dma_desc vdesc;
+ enum dma_status status;
+ struct fsl_qdma_engine *qdma;
+ struct fsl_qdma_queue *queue;
+};
+
+struct fsl_qdma_queue {
+ struct fsl_qdma_format *virt_head;
+ struct fsl_qdma_format *virt_tail;
+ struct list_head comp_used;
+ struct list_head comp_free;
+ struct dma_pool *comp_pool;
+ struct dma_pool *desc_pool;
+ spinlock_t queue_lock;
+ dma_addr_t bus_addr;
+ u32 n_cq;
+ u32 id;
+ struct fsl_qdma_format *cq;
+ void __iomem *block_base;
+};
+
+struct fsl_qdma_comp {
+ dma_addr_t bus_addr;
+ dma_addr_t desc_bus_addr;
+ struct fsl_qdma_format *virt_addr;
+ struct fsl_qdma_format *desc_virt_addr;
+ struct fsl_qdma_chan *qchan;
+ struct virt_dma_desc vdesc;
+ struct list_head list;
+};
+
+struct fsl_qdma_engine {
+ struct dma_device dma_dev;
+ void __iomem *ctrl_base;
+ void __iomem *status_base;
+ void __iomem *block_base;
+ u32 n_chans;
+ u32 n_queues;
+ struct mutex fsl_qdma_mutex;
+ int error_irq;
+ int *queue_irq;
+ u32 feature;
+ struct fsl_qdma_queue *queue;
+ struct fsl_qdma_queue **status;
+ struct fsl_qdma_chan *chans;
+ int block_number;
+ int block_offset;
+ int irq_base;
+ int desc_allocated;
+
+};
+
+static inline u64
+qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
+{
+ return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
+}
+
+static inline void
+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+{
+ ccdf->addr_hi = upper_32_bits(addr);
+ ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+
+static inline u8
+qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+{
+ return ccdf->cfg8b_w1 & U8_MAX;
+}
+
+static inline int
+qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+{
+ return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+
+static inline void
+qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
+{
+ ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+
+static inline int
+qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+{
+ return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+
+static inline void
+qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+{
+ ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+
+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+
+static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+ return FSL_DMA_IN(qdma, addr, 32);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+ void __iomem *addr)
+{
+ FSL_DMA_OUT(qdma, addr, val, 32);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_qdma_comp, vdesc);
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+ return;
+
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &fsl_queue->comp_used, list) {
+ dma_pool_free(fsl_queue->comp_pool,
+ comp_temp->virt_addr,
+ comp_temp->bus_addr);
+ dma_pool_free(fsl_queue->desc_pool,
+ comp_temp->desc_virt_addr,
+ comp_temp->desc_bus_addr);
+ list_del(&comp_temp->list);
+ kfree(comp_temp);
+ }
+
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &fsl_queue->comp_free, list) {
+ dma_pool_free(fsl_queue->comp_pool,
+ comp_temp->virt_addr,
+ comp_temp->bus_addr);
+ dma_pool_free(fsl_queue->desc_pool,
+ comp_temp->desc_virt_addr,
+ comp_temp->desc_bus_addr);
+ list_del(&comp_temp->list);
+ kfree(comp_temp);
+ }
+
+ dma_pool_destroy(fsl_queue->comp_pool);
+ dma_pool_destroy(fsl_queue->desc_pool);
+
+ fsl_qdma->desc_allocated--;
+ fsl_queue->comp_pool = NULL;
+ fsl_queue->desc_pool = NULL;
+}
+
+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+ dma_addr_t dst, dma_addr_t src, u32 len)
+{
+ struct fsl_qdma_format *sdf, *ddf;
+ struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
+
+ ccdf = fsl_comp->virt_addr;
+ csgf_desc = fsl_comp->virt_addr + 1;
+ csgf_src = fsl_comp->virt_addr + 2;
+ csgf_dest = fsl_comp->virt_addr + 3;
+ sdf = fsl_comp->desc_virt_addr;
+ ddf = fsl_comp->desc_virt_addr + 1;
+
+ memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
+ memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+ /* Head Command Descriptor(Frame Descriptor) */
+ qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
+ /* Status notification is enqueued to status queue. */
+ /* Compound Command Descriptor(Frame List Table) */
+ qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
+ /* It must be 32 as Compound S/G Descriptor */
+ qdma_csgf_set_len(csgf_desc, 32);
+ qdma_desc_addr_set64(csgf_src, src);
+ qdma_csgf_set_len(csgf_src, len);
+ qdma_desc_addr_set64(csgf_dest, dst);
+ qdma_csgf_set_len(csgf_dest, len);
+ /* This entry is the last entry. */
+ qdma_csgf_set_f(csgf_dest, len);
+ /* Descriptor Buffer */
+ sdf->data =
+ cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
+ FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ ddf->data =
+ cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
+ FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ ddf->data |=
+ cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+}
+
+/*
+ * Pre-request full command descriptor for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
+{
+ int i;
+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
+
+ for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+ if (!comp_temp)
+ goto err_alloc;
+ comp_temp->virt_addr =
+ dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
+ &comp_temp->bus_addr);
+ if (!comp_temp->virt_addr)
+ goto err_dma_alloc;
+
+ comp_temp->desc_virt_addr =
+ dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
+ &comp_temp->desc_bus_addr);
+ if (!comp_temp->desc_virt_addr)
+ goto err_desc_dma_alloc;
+
+ list_add_tail(&comp_temp->list, &queue->comp_free);
+ }
+
+ return 0;
+
+err_desc_dma_alloc:
+ dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
+ comp_temp->bus_addr);
+
+err_dma_alloc:
+ kfree(comp_temp);
+
+err_alloc:
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &queue->comp_free, list) {
+ if (comp_temp->virt_addr)
+ dma_pool_free(queue->comp_pool,
+ comp_temp->virt_addr,
+ comp_temp->bus_addr);
+ if (comp_temp->desc_virt_addr)
+ dma_pool_free(queue->desc_pool,
+ comp_temp->desc_virt_addr,
+ comp_temp->desc_bus_addr);
+
+ list_del(&comp_temp->list);
+ kfree(comp_temp);
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct fsl_qdma_comp
+*fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+ unsigned long flags;
+ struct fsl_qdma_comp *comp_temp;
+ int timeout = FSL_QDMA_COMP_TIMEOUT;
+ struct fsl_qdma_queue *queue = fsl_chan->queue;
+
+ while (timeout--) {
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ if (!list_empty(&queue->comp_free)) {
+ comp_temp = list_first_entry(&queue->comp_free,
+ struct fsl_qdma_comp,
+ list);
+ list_del(&comp_temp->list);
+
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+ comp_temp->qchan = fsl_chan;
+ return comp_temp;
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+ udelay(1);
+ }
+
+ return NULL;
+}
+
+static struct fsl_qdma_queue
+*fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
+ struct fsl_qdma_engine *fsl_qdma)
+{
+ int ret, len, i, j;
+ int queue_num, block_number;
+ unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+ struct fsl_qdma_queue *queue_head, *queue_temp;
+
+ queue_num = fsl_qdma->n_queues;
+ block_number = fsl_qdma->block_number;
+
+ if (queue_num > FSL_QDMA_QUEUE_MAX)
+ queue_num = FSL_QDMA_QUEUE_MAX;
+ len = sizeof(*queue_head) * queue_num * block_number;
+ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!queue_head)
+ return NULL;
+
+ ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
+ queue_size, queue_num);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get queue-sizes.\n");
+ return NULL;
+ }
+ for (j = 0; j < block_number; j++) {
+ for (i = 0; i < queue_num; i++) {
+ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
+ queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ dev_err(&pdev->dev,
+ "Get wrong queue-sizes.\n");
+ return NULL;
+ }
+ queue_temp = queue_head + i + (j * queue_num);
+
+ queue_temp->cq =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
+ if (!queue_temp->cq)
+ return NULL;
+ queue_temp->block_base = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ queue_temp->n_cq = queue_size[i];
+ queue_temp->id = i;
+ queue_temp->virt_head = queue_temp->cq;
+ queue_temp->virt_tail = queue_temp->cq;
+ /*
+ * List for queue command buffer
+ */
+ INIT_LIST_HEAD(&queue_temp->comp_used);
+ spin_lock_init(&queue_temp->queue_lock);
+ }
+ }
+ return queue_head;
+}
+
+static struct fsl_qdma_queue
+*fsl_qdma_prep_status_queue(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int status_size;
+ struct fsl_qdma_queue *status_head;
+ struct device_node *np = pdev->dev.of_node;
+
+ ret = of_property_read_u32(np, "status-sizes", &status_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get status-sizes.\n");
+ return NULL;
+ }
+ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
+ status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ dev_err(&pdev->dev, "Get wrong status_size.\n");
+ return NULL;
+ }
+ status_head = devm_kzalloc(&pdev->dev,
+ sizeof(*status_head), GFP_KERNEL);
+ if (!status_head)
+ return NULL;
+
+ /*
+ * Buffer for queue command
+ */
+ status_head->cq = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
+ if (!status_head->cq) {
+ devm_kfree(&pdev->dev, status_head);
+ return NULL;
+ }
+ status_head->n_cq = status_size;
+ status_head->virt_head = status_head->cq;
+ status_head->virt_tail = status_head->cq;
+ status_head->comp_pool = NULL;
+
+ return status_head;
+}
+
+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+ u32 reg;
+ int i, j, count = FSL_QDMA_HALT_COUNT;
+ void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
+
+ /* Disable the command queue and wait for idle state. */
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+ reg |= FSL_QDMA_DMR_DQD;
+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+ for (j = 0; j < fsl_qdma->block_number; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
+ }
+ while (1) {
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
+ if (!(reg & FSL_QDMA_DSR_DB))
+ break;
+ if (count-- < 0)
+ return -EBUSY;
+ udelay(100);
+ }
+
+ for (j = 0; j < fsl_qdma->block_number; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+
+ /* Disable status queue. */
+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
+
+ /*
+ * clear the command queue interrupt detect register for
+ * all queues.
+ */
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
+ block + FSL_QDMA_BCQIDR(0));
+ }
+
+ return 0;
+}
+
+static int
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
+ void *block,
+ int id)
+{
+ bool duplicate;
+ u32 reg, i, count;
+ struct fsl_qdma_queue *temp_queue;
+ struct fsl_qdma_format *status_addr;
+ struct fsl_qdma_comp *fsl_comp = NULL;
+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+ struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
+
+ count = FSL_QDMA_MAX_SIZE;
+
+ while (count--) {
+ duplicate = 0;
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
+ if (reg & FSL_QDMA_BSQSR_QE)
+ return 0;
+
+ status_addr = fsl_status->virt_head;
+
+ if (qdma_ccdf_get_queue(status_addr) ==
+ __this_cpu_read(pre.queue) &&
+ qdma_ccdf_addr_get64(status_addr) ==
+ __this_cpu_read(pre.addr))
+ duplicate = 1;
+ i = qdma_ccdf_get_queue(status_addr) +
+ id * fsl_qdma->n_queues;
+ __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
+ __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
+ temp_queue = fsl_queue + i;
+
+ spin_lock(&temp_queue->queue_lock);
+ if (list_empty(&temp_queue->comp_used)) {
+ if (!duplicate) {
+ spin_unlock(&temp_queue->queue_lock);
+ return -EAGAIN;
+ }
+ } else {
+ fsl_comp = list_first_entry(&temp_queue->comp_used,
+ struct fsl_qdma_comp, list);
+ if (fsl_comp->bus_addr + 16 !=
+ __this_cpu_read(pre.addr)) {
+ if (!duplicate) {
+ spin_unlock(&temp_queue->queue_lock);
+ return -EAGAIN;
+ }
+ }
+ }
+
+ if (duplicate) {
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+ reg |= FSL_QDMA_BSQMR_DI;
+ qdma_desc_addr_set64(status_addr, 0x0);
+ fsl_status->virt_head++;
+ if (fsl_status->virt_head == fsl_status->cq
+ + fsl_status->n_cq)
+ fsl_status->virt_head = fsl_status->cq;
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+ spin_unlock(&temp_queue->queue_lock);
+ continue;
+ }
+ list_del(&fsl_comp->list);
+
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+ reg |= FSL_QDMA_BSQMR_DI;
+ qdma_desc_addr_set64(status_addr, 0x0);
+ fsl_status->virt_head++;
+ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
+ fsl_status->virt_head = fsl_status->cq;
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+ spin_unlock(&temp_queue->queue_lock);
+
+ spin_lock(&fsl_comp->qchan->vchan.lock);
+ vchan_cookie_complete(&fsl_comp->vdesc);
+ fsl_comp->qchan->status = DMA_COMPLETE;
+ spin_unlock(&fsl_comp->qchan->vchan.lock);
+ }
+
+ return 0;
+}
+
+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
+{
+ unsigned int intr;
+ struct fsl_qdma_engine *fsl_qdma = dev_id;
+ void __iomem *status = fsl_qdma->status_base;
+
+ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
+
+ if (intr) {
+ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
+ return IRQ_NONE;
+ }
+
+ qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
+{
+ int id;
+ unsigned int intr, reg;
+ struct fsl_qdma_engine *fsl_qdma = dev_id;
+ void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
+
+ id = irq - fsl_qdma->irq_base;
+ if (id < 0 && id > fsl_qdma->block_number) {
+ dev_err(fsl_qdma->dma_dev.dev,
+ "irq %d is wrong irq_base is %d\n",
+ irq, fsl_qdma->irq_base);
+ }
+
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
+
+ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
+
+ if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
+ intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
+
+ if (intr != 0) {
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+ reg |= FSL_QDMA_DMR_DQD;
+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
+ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
+ }
+
+ /* Clear all detected events and interrupts. */
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
+ block + FSL_QDMA_BCQIDR(0));
+
+ return IRQ_HANDLED;
+}
+
+static int
+fsl_qdma_irq_init(struct platform_device *pdev,
+ struct fsl_qdma_engine *fsl_qdma)
+{
+ int i;
+ int cpu;
+ int ret;
+ char irq_name[20];
+
+ fsl_qdma->error_irq =
+ platform_get_irq_byname(pdev, "qdma-error");
+ if (fsl_qdma->error_irq < 0) {
+ dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+ return fsl_qdma->error_irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
+ fsl_qdma_error_handler, 0,
+ "qDMA error", fsl_qdma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
+ return ret;
+ }
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ sprintf(irq_name, "qdma-queue%d", i);
+ fsl_qdma->queue_irq[i] =
+ platform_get_irq_byname(pdev, irq_name);
+
+ if (fsl_qdma->queue_irq[i] < 0) {
+ dev_err(&pdev->dev,
+ "Can't get qdma queue %d irq.\n", i);
+ return fsl_qdma->queue_irq[i];
+ }
+
+ ret = devm_request_irq(&pdev->dev,
+ fsl_qdma->queue_irq[i],
+ fsl_qdma_queue_handler,
+ 0,
+ "qDMA queue",
+ fsl_qdma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register qDMA queue IRQ.\n");
+ return ret;
+ }
+
+ cpu = i % num_online_cpus();
+ ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
+ get_cpu_mask(cpu));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't set cpu %d affinity to IRQ %d.\n",
+ cpu,
+ fsl_qdma->queue_irq[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void fsl_qdma_irq_exit(struct platform_device *pdev,
+ struct fsl_qdma_engine *fsl_qdma)
+{
+ int i;
+
+ devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
+ for (i = 0; i < fsl_qdma->block_number; i++)
+ devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+ u32 reg;
+ int i, j, ret;
+ struct fsl_qdma_queue *temp;
+ void __iomem *status = fsl_qdma->status_base;
+ void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+
+ /* Try to halt the qDMA engine first. */
+ ret = fsl_qdma_halt(fsl_qdma);
+ if (ret) {
+ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
+ return ret;
+ }
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ /*
+ * Clear the command queue interrupt detect register for
+ * all queues.
+ */
+
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
+ block + FSL_QDMA_BCQIDR(0));
+ }
+
+ for (j = 0; j < fsl_qdma->block_number; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ for (i = 0; i < fsl_qdma->n_queues; i++) {
+ temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+ /*
+ * Initialize Command Queue registers to
+ * point to the first
+ * command descriptor in memory.
+ * Dequeue Pointer Address Registers
+ * Enqueue Pointer Address Registers
+ */
+
+ qdma_writel(fsl_qdma, temp->bus_addr,
+ block + FSL_QDMA_BCQDPA_SADDR(i));
+ qdma_writel(fsl_qdma, temp->bus_addr,
+ block + FSL_QDMA_BCQEPA_SADDR(i));
+
+ /* Initialize the queue mode. */
+ reg = FSL_QDMA_BCQMR_EN;
+ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
+ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
+ }
+
+ /*
+ * Workaround for erratum: ERR010812.
+ * We must enable XOFF to avoid the enqueue rejection occurs.
+ * Setting SQCCMR ENTER_WM to 0x20.
+ */
+
+ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
+ block + FSL_QDMA_SQCCMR);
+
+ /*
+ * Initialize status queue registers to point to the first
+ * command descriptor in memory.
+ * Dequeue Pointer Address Registers
+ * Enqueue Pointer Address Registers
+ */
+
+ qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
+ block + FSL_QDMA_SQEPAR);
+ qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
+ block + FSL_QDMA_SQDPAR);
+ /* Initialize status queue interrupt. */
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
+ block + FSL_QDMA_BCQIER(0));
+ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
+ FSL_QDMA_BSQICR_ICST(5) | 0x8000,
+ block + FSL_QDMA_BSQICR);
+ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
+ FSL_QDMA_CQIER_TEIE,
+ block + FSL_QDMA_CQIER);
+
+ /* Initialize the status queue mode. */
+ reg = FSL_QDMA_BSQMR_EN;
+ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
+ (fsl_qdma->status[j]->n_cq) - 6);
+
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+ }
+
+ /* Initialize controller interrupt register. */
+ qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
+ qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
+
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+ reg &= ~FSL_QDMA_DMR_DQD;
+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct fsl_qdma_comp *fsl_comp;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
+
+ if (!fsl_comp)
+ return NULL;
+
+ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+ u32 reg;
+ struct virt_dma_desc *vdesc;
+ struct fsl_qdma_comp *fsl_comp;
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+ void __iomem *block = fsl_queue->block_base;
+
+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
+ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
+ return;
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ list_del(&vdesc->node);
+ fsl_comp = to_fsl_qdma_comp(vdesc);
+
+ memcpy(fsl_queue->virt_head++,
+ fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
+ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+ fsl_queue->virt_head = fsl_queue->cq;
+
+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
+ barrier();
+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
+ reg |= FSL_QDMA_BCQMR_EI;
+ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+ fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ unsigned long flags;
+ struct fsl_qdma_comp *fsl_comp;
+ struct fsl_qdma_queue *fsl_queue;
+
+ fsl_comp = to_fsl_qdma_comp(vdesc);
+ fsl_queue = fsl_comp->qchan->queue;
+
+ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+ spin_lock(&fsl_chan->vchan.lock);
+ if (vchan_issue_pending(&fsl_chan->vchan))
+ fsl_qdma_enqueue_desc(fsl_chan);
+ spin_unlock(&fsl_chan->vchan.lock);
+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_synchronize(struct dma_chan *chan)
+{
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+ vchan_synchronize(&fsl_chan->vchan);
+}
+
+static int fsl_qdma_terminate_all(struct dma_chan *chan)
+{
+ LIST_HEAD(head);
+ unsigned long flags;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+}
+
+static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ int ret;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+ if (fsl_queue->comp_pool && fsl_queue->desc_pool)
+ return fsl_qdma->desc_allocated;
+
+ INIT_LIST_HEAD(&fsl_queue->comp_free);
+
+ /*
+ * The dma pool for queue command buffer
+ */
+ fsl_queue->comp_pool =
+ dma_pool_create("comp_pool",
+ chan->device->dev,
+ FSL_QDMA_COMMAND_BUFFER_SIZE,
+ 64, 0);
+ if (!fsl_queue->comp_pool)
+ return -ENOMEM;
+
+ /*
+ * The dma pool for Descriptor(SD/DD) buffer
+ */
+ fsl_queue->desc_pool =
+ dma_pool_create("desc_pool",
+ chan->device->dev,
+ FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
+ 32, 0);
+ if (!fsl_queue->desc_pool)
+ goto err_desc_pool;
+
+ ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
+ if (ret) {
+ dev_err(chan->device->dev,
+ "failed to alloc dma buffer for S/G descriptor\n");
+ goto err_mem;
+ }
+
+ fsl_qdma->desc_allocated++;
+ return fsl_qdma->desc_allocated;
+
+err_mem:
+ dma_pool_destroy(fsl_queue->desc_pool);
+err_desc_pool:
+ dma_pool_destroy(fsl_queue->comp_pool);
+ return -ENOMEM;
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ int blk_num, blk_off;
+ u32 len, chans, queues;
+ struct resource *res;
+ struct fsl_qdma_chan *fsl_chan;
+ struct fsl_qdma_engine *fsl_qdma;
+ struct device_node *np = pdev->dev.of_node;
+
+ ret = of_property_read_u32(np, "dma-channels", &chans);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "block-offset", &blk_off);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get block-offset.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "block-number", &blk_num);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get block-number.\n");
+ return ret;
+ }
+
+ blk_num = min_t(int, blk_num, num_online_cpus());
+
+ len = sizeof(*fsl_qdma);
+ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma)
+ return -ENOMEM;
+
+ len = sizeof(*fsl_chan) * chans;
+ fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma->chans)
+ return -ENOMEM;
+
+ len = sizeof(struct fsl_qdma_queue *) * blk_num;
+ fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma->status)
+ return -ENOMEM;
+
+ len = sizeof(int) * blk_num;
+ fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma->queue_irq)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get queues.\n");
+ return ret;
+ }
+
+ fsl_qdma->desc_allocated = 0;
+ fsl_qdma->n_chans = chans;
+ fsl_qdma->n_queues = queues;
+ fsl_qdma->block_number = blk_num;
+ fsl_qdma->block_offset = blk_off;
+
+ mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
+ if (!fsl_qdma->status[i])
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_qdma->ctrl_base))
+ return PTR_ERR(fsl_qdma->ctrl_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_qdma->status_base))
+ return PTR_ERR(fsl_qdma->status_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_qdma->block_base))
+ return PTR_ERR(fsl_qdma->block_base);
+ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
+ if (!fsl_qdma->queue)
+ return -ENOMEM;
+
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ fsl_qdma->feature = of_property_read_bool(np, "big-endian");
+ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+
+ for (i = 0; i < fsl_qdma->n_chans; i++) {
+ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+ fsl_chan->qdma = fsl_qdma;
+ fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
+ fsl_qdma->block_number);
+ fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+ }
+
+ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+ fsl_qdma->dma_dev.dev = &pdev->dev;
+ fsl_qdma->dma_dev.device_free_chan_resources =
+ fsl_qdma_free_chan_resources;
+ fsl_qdma->dma_dev.device_alloc_chan_resources =
+ fsl_qdma_alloc_chan_resources;
+ fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
+ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+ fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
+ fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+ platform_set_drvdata(pdev, fsl_qdma);
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register NXP Layerscape qDMA engine.\n");
+ return ret;
+ }
+
+ ret = fsl_qdma_reg_init(fsl_qdma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_qdma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+ int i;
+ struct fsl_qdma_queue *status;
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+
+ fsl_qdma_irq_exit(pdev, fsl_qdma);
+ fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ status = fsl_qdma->status[i];
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+ status->n_cq, status->cq, status->bus_addr);
+ }
+ return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+ { .compatible = "fsl,ls1021a-qdma", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+ .driver = {
+ .name = "fsl-qdma",
+ .of_match_table = fsl_qdma_dt_ids,
+ },
+ .probe = fsl_qdma_probe,
+ .remove = fsl_qdma_remove,
+};
+
+module_platform_driver(fsl_qdma_driver);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 9d360a3fbae3..1e38e6b94006 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -53,42 +53,42 @@ static const char msg_ld_oom[] = "No free memory for link descriptor";
static void set_sr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(chan, &chan->regs->sr, val, 32);
+ FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
}
static u32 get_sr(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->sr, 32);
+ return FSL_DMA_IN(chan, &chan->regs->sr, 32);
}
static void set_mr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(chan, &chan->regs->mr, val, 32);
+ FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
}
static u32 get_mr(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->mr, 32);
+ return FSL_DMA_IN(chan, &chan->regs->mr, 32);
}
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
- DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+ FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}
static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+ return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
static void set_bcr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(chan, &chan->regs->bcr, val, 32);
+ FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
}
static u32 get_bcr(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->bcr, 32);
+ return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
}
/*
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4787d485dd76..a9b12f82b5c3 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -196,39 +196,67 @@ struct fsldma_chan {
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
-#ifndef __powerpc64__
-static u64 in_be64(const u64 __iomem *addr)
+#ifdef CONFIG_PPC
+#define fsl_ioread32(p) in_le32(p)
+#define fsl_ioread32be(p) in_be32(p)
+#define fsl_iowrite32(v, p) out_le32(p, v)
+#define fsl_iowrite32be(v, p) out_be32(p, v)
+
+#ifdef __powerpc64__
+#define fsl_ioread64(p) in_le64(p)
+#define fsl_ioread64be(p) in_be64(p)
+#define fsl_iowrite64(v, p) out_le64(p, v)
+#define fsl_iowrite64be(v, p) out_be64(p, v)
+#else
+static u64 fsl_ioread64(const u64 __iomem *addr)
{
- return ((u64)in_be32((u32 __iomem *)addr) << 32) |
- (in_be32((u32 __iomem *)addr + 1));
+ u32 fsl_addr = lower_32_bits(addr);
+ u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32;
+
+ return fsl_addr_hi | in_le32((u32 *)fsl_addr);
}
-static void out_be64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64(u64 val, u64 __iomem *addr)
{
- out_be32((u32 __iomem *)addr, val >> 32);
- out_be32((u32 __iomem *)addr + 1, (u32)val);
+ out_le32((u32 __iomem *)addr + 1, val >> 32);
+ out_le32((u32 __iomem *)addr, (u32)val);
}
-/* There is no asm instructions for 64 bits reverse loads and stores */
-static u64 in_le64(const u64 __iomem *addr)
+static u64 fsl_ioread64be(const u64 __iomem *addr)
{
- return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
- (in_le32((u32 __iomem *)addr));
+ u32 fsl_addr = lower_32_bits(addr);
+ u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32;
+
+ return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1));
}
-static void out_le64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
{
- out_le32((u32 __iomem *)addr + 1, val >> 32);
- out_le32((u32 __iomem *)addr, (u32)val);
+ out_be32((u32 __iomem *)addr, val >> 32);
+ out_be32((u32 __iomem *)addr + 1, (u32)val);
}
#endif
+#endif
-#define DMA_IN(fsl_chan, addr, width) \
- (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- in_be##width(addr) : in_le##width(addr))
-#define DMA_OUT(fsl_chan, addr, val, width) \
- (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- out_be##width(addr, val) : out_le##width(addr, val))
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+#define fsl_ioread32(p) ioread32(p)
+#define fsl_ioread32be(p) ioread32be(p)
+#define fsl_iowrite32(v, p) iowrite32(v, p)
+#define fsl_iowrite32be(v, p) iowrite32be(v, p)
+#define fsl_ioread64(p) ioread64(p)
+#define fsl_ioread64be(p) ioread64be(p)
+#define fsl_iowrite64(v, p) iowrite64(v, p)
+#define fsl_iowrite64be(v, p) iowrite64be(v, p)
+#endif
+
+#define FSL_DMA_IN(fsl_dma, addr, width) \
+ (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
+
+#define FSL_DMA_OUT(fsl_dma, addr, val, width) \
+ (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ fsl_iowrite##width##be(val, addr) : fsl_iowrite \
+ ##width(val, addr))
#define DMA_TO_CPU(fsl_chan, d, width) \
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4a09af3cd546..00a089e24150 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -278,14 +278,14 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
/*
* imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
*/
-static inline int imxdma_sg_next(struct imxdma_desc *d)
+static inline void imxdma_sg_next(struct imxdma_desc *d)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct scatterlist *sg = d->sg;
- unsigned long now;
+ size_t now;
- now = min(d->len, sg_dma_len(sg));
+ now = min_t(size_t, d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
@@ -303,8 +303,6 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
-
- return now;
}
static void imxdma_enable_hw(struct imxdma_desc *d)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 86708fb9bda1..5f3c1378b90e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -377,6 +377,7 @@ struct sdma_channel {
unsigned long watermark_level;
u32 shp_addr, per_addr;
enum dma_status status;
+ bool context_loaded;
struct imx_dma_data data;
struct work_struct terminate_worker;
};
@@ -440,6 +441,8 @@ struct sdma_engine {
unsigned int irq;
dma_addr_t bd0_phys;
struct sdma_buffer_descriptor *bd0;
+ /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
+ bool clk_ratio;
};
static int sdma_config_write(struct dma_chan *chan,
@@ -662,8 +665,11 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
/* Set bits of CONFIG register with dynamic context switching */
- if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
- writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+ reg = readl(sdma->regs + SDMA_H_CONFIG);
+ if ((reg & SDMA_H_CONFIG_CSM) == 0) {
+ reg |= SDMA_H_CONFIG_CSM;
+ writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
+ }
return ret;
}
@@ -677,7 +683,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
int ret;
unsigned long flags;
- buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL);
+ buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
if (!buf_virt) {
return -ENOMEM;
}
@@ -696,7 +702,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
return ret;
}
@@ -970,6 +976,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int ret;
unsigned long flags;
+ if (sdmac->context_loaded)
+ return 0;
+
if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
@@ -1012,6 +1021,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+ sdmac->context_loaded = true;
+
return ret;
}
@@ -1051,6 +1062,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
+ sdmac->context_loaded = false;
}
static int sdma_disable_channel_async(struct dma_chan *chan)
@@ -1182,8 +1194,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{
int ret = -EBUSY;
- sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
@@ -1205,8 +1217,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
- GFP_NOWAIT);
+ desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
+ &desc->bd_phys, GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1219,7 +1231,8 @@ static void sdma_free_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
- dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
+ dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
+ desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1839,10 +1852,13 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret)
goto disable_clk_ipg;
+ if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
+ sdma->clk_ratio = 1;
+
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
- sdma->channel_control = dma_alloc_coherent(NULL,
+ sdma->channel_control = dma_alloc_coherent(sdma->dev,
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
sizeof(struct sdma_context_data),
&ccb_phys, GFP_KERNEL);
@@ -1879,8 +1895,10 @@ static int sdma_init(struct sdma_engine *sdma)
writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
/* Set bits of CONFIG register but with static context switching */
- /* FIXME: Check whether to set ACR bit depending on clock ratios */
- writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
+ if (sdma->clk_ratio)
+ writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
+ else
+ writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
@@ -1903,11 +1921,16 @@ disable_clk_ipg:
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
struct imx_dma_data *data = fn_param;
if (!imx_dma_is_general_purpose(chan))
return false;
+ /* return false if it's not the right device */
+ if (sdma->dev->of_node != data->of_node)
+ return false;
+
sdmac->data = *data;
chan->private = &sdmac->data;
@@ -1935,6 +1958,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
* be set to sdmac->event_id1.
*/
data.dma_request2 = 0;
+ data.of_node = ofdma->of_node;
return dma_request_channel(mask, sdma_filter_fn, &data);
}
@@ -2097,6 +2121,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+ sdma->dma_device.copy_align = 2;
dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
platform_set_drvdata(pdev, sdma);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 23fb2fa04000..f373a139e0c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -372,6 +372,7 @@ struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
struct ioat_ring_ent **ring;
int total_descs = 1 << order;
int i, chunks;
@@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
}
ring[i]->hw->next = ring[0]->txd.phys;
+ /* setup descriptor pre-fetching for v3.4 */
+ if (ioat_dma->cap & IOAT_CAP_DPS) {
+ u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
+
+ if (chunks == 1)
+ drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
+
+ writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
+
+ }
+
return ring;
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1ab42ec2b7ff..aaafd0e882b5 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -27,7 +27,7 @@
#include "registers.h"
#include "hw.h"
-#define IOAT_DMA_VERSION "4.00"
+#define IOAT_DMA_VERSION "5.00"
#define IOAT_DMA_DCA_ANY_CPU ~0
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index abcc51b343ce..781c94de8e81 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -66,11 +66,14 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+#define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
+#define IOAT_VER_3_4 0x34 /* Version 3.4 */
int system_has_dca_enabled(struct pci_dev *pdev);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2d810dfcdc48..d41dc9a9ff68 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+ /* I/OAT v3.4 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -135,10 +138,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-int ioat_pending_level = 4;
+int ioat_pending_level = 7;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
- "high-water mark for pushing ioat descriptors (default: 4)");
+ "high-water mark for pushing ioat descriptors (default: 7)");
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
@@ -635,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_stop(ioat_chan);
ioat_reset_hw(ioat_chan);
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
descs = ioat_ring_space(ioat_chan);
@@ -724,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
+ /* Setting up LTR values for 3.4 or later */
+ if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) {
+ u32 lat_val;
+
+ lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL |
+ IOAT_CHAN_LTR_ACTIVE_SNLATSCALE |
+ IOAT_CHAN_LTR_ACTIVE_SNREQMNT;
+ writel(lat_val, ioat_chan->reg_base +
+ IOAT_CHAN_LTR_ACTIVE_OFFSET);
+
+ lat_val = IOAT_CHAN_LTR_IDLE_SNVAL |
+ IOAT_CHAN_LTR_IDLE_SNLATSCALE |
+ IOAT_CHAN_LTR_IDLE_SNREQMNT;
+ writel(lat_val, ioat_chan->reg_base +
+ IOAT_CHAN_LTR_IDLE_OFFSET);
+
+ /* Select to active */
+ writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
+
ioat_start_null_desc(ioat_chan);
/* check that we got off the ground */
@@ -1185,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
if (err)
return err;
+ if (ioat_dma->cap & IOAT_CAP_DPS)
+ writeb(ioat_pending_level + 1,
+ ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+
return 0;
}
@@ -1350,6 +1384,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ if (device->version >= IOAT_VER_3_4)
+ ioat_dca_enabled = 0;
if (device->version >= IOAT_VER_3_0) {
if (is_skx_ioat(pdev))
device->version = IOAT_VER_3_2;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 2f3bbc88ff2a..99c1c24d465d 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -84,6 +84,9 @@
#define IOAT_CAP_PQ 0x00000200
#define IOAT_CAP_DWBES 0x00002000
#define IOAT_CAP_RAID16SS 0x00020000
+#define IOAT_CAP_DPS 0x00800000
+
+#define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
@@ -243,4 +246,25 @@
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
+#define IOAT_CHAN_DRSCTL_OFFSET 0xB6
+#define IOAT_CHAN_DRSZ_4KB 0x0000
+#define IOAT_CHAN_DRSZ_8KB 0x0001
+#define IOAT_CHAN_DRSZ_2MB 0x0009
+#define IOAT_CHAN_DRS_EN 0x0100
+#define IOAT_CHAN_DRS_AUTOWRAP 0x0200
+
+#define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC
+#define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0
+#define IOAT_CHAN_LTR_SWSEL_IDLE 0x1
+
+#define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0
+#define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */
+#define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */
+#define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */
+
+#define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4
+#define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */
+#define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */
+#define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */
+
#endif /* _IOAT_REGISTERS_H_ */
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fdec2b6cfbb0..5737d92eaeeb 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -52,8 +52,6 @@
#define CX_SRC 0x814
#define CX_DST 0x818
#define CX_CFG 0x81c
-#define AXI_CFG 0x820
-#define AXI_CFG_DEFAULT 0x201201
#define CX_LLI_CHAIN_EN 0x2
#define CX_CFG_EN 0x1
@@ -113,9 +111,18 @@ struct k3_dma_dev {
struct dma_pool *pool;
u32 dma_channels;
u32 dma_requests;
+ u32 dma_channel_mask;
unsigned int irq;
};
+
+#define K3_FLAG_NOCLK BIT(1)
+
+struct k3dma_soc_data {
+ unsigned long flags;
+};
+
+
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
static int k3_dma_config_write(struct dma_chan *chan,
@@ -161,7 +168,6 @@ static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
writel_relaxed(hw->count, phy->base + CX_CNT0);
writel_relaxed(hw->saddr, phy->base + CX_SRC);
writel_relaxed(hw->daddr, phy->base + CX_DST);
- writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
writel_relaxed(hw->config, phy->base + CX_CFG);
}
@@ -314,6 +320,9 @@ static void k3_dma_tasklet(unsigned long arg)
/* check new channel request in d->chan_pending */
spin_lock_irq(&d->lock);
for (pch = 0; pch < d->dma_channels; pch++) {
+ if (!(d->dma_channel_mask & (1 << pch)))
+ continue;
+
p = &d->phy[pch];
if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
@@ -331,6 +340,9 @@ static void k3_dma_tasklet(unsigned long arg)
spin_unlock_irq(&d->lock);
for (pch = 0; pch < d->dma_channels; pch++) {
+ if (!(d->dma_channel_mask & (1 << pch)))
+ continue;
+
if (pch_alloc & (1 << pch)) {
p = &d->phy[pch];
c = p->vchan;
@@ -790,8 +802,21 @@ static int k3_dma_transfer_resume(struct dma_chan *chan)
return 0;
}
+static const struct k3dma_soc_data k3_v1_dma_data = {
+ .flags = 0,
+};
+
+static const struct k3dma_soc_data asp_v1_dma_data = {
+ .flags = K3_FLAG_NOCLK,
+};
+
static const struct of_device_id k3_pdma_dt_ids[] = {
- { .compatible = "hisilicon,k3-dma-1.0", },
+ { .compatible = "hisilicon,k3-dma-1.0",
+ .data = &k3_v1_dma_data
+ },
+ { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
+ .data = &asp_v1_dma_data
+ },
{}
};
MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
@@ -810,6 +835,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
static int k3_dma_probe(struct platform_device *op)
{
+ const struct k3dma_soc_data *soc_data;
struct k3_dma_dev *d;
const struct of_device_id *of_id;
struct resource *iores;
@@ -823,6 +849,10 @@ static int k3_dma_probe(struct platform_device *op)
if (!d)
return -ENOMEM;
+ soc_data = device_get_match_data(&op->dev);
+ if (!soc_data)
+ return -EINVAL;
+
d->base = devm_ioremap_resource(&op->dev, iores);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
@@ -833,12 +863,21 @@ static int k3_dma_probe(struct platform_device *op)
"dma-channels", &d->dma_channels);
of_property_read_u32((&op->dev)->of_node,
"dma-requests", &d->dma_requests);
+ ret = of_property_read_u32((&op->dev)->of_node,
+ "dma-channel-mask", &d->dma_channel_mask);
+ if (ret) {
+ dev_warn(&op->dev,
+ "dma-channel-mask doesn't exist, considering all as available.\n");
+ d->dma_channel_mask = (u32)~0UL;
+ }
}
- d->clk = devm_clk_get(&op->dev, NULL);
- if (IS_ERR(d->clk)) {
- dev_err(&op->dev, "no dma clk\n");
- return PTR_ERR(d->clk);
+ if (!(soc_data->flags & K3_FLAG_NOCLK)) {
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
}
irq = platform_get_irq(op, 0);
@@ -862,8 +901,12 @@ static int k3_dma_probe(struct platform_device *op)
return -ENOMEM;
for (i = 0; i < d->dma_channels; i++) {
- struct k3_dma_phy *p = &d->phy[i];
+ struct k3_dma_phy *p;
+
+ if (!(d->dma_channel_mask & BIT(i)))
+ continue;
+ p = &d->phy[i];
p->idx = i;
p->base = d->base + i * 0x40;
}
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index 5de1b07eddff..7de54b2fafdb 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -214,6 +214,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_chan->edma = mcf_edma;
mcf_chan->slave_id = i;
mcf_chan->idle = true;
+ mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
iowrite32(0x0, &regs->tcd[i].csr);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7f595355fb79..65af2e7fcb2c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
+ dma_dev->dev = &pdev->dev;
mv_chan->xordev = xordev;
/*
@@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
- dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
@@ -1153,7 +1153,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
- dma_async_device_register(dma_dev);
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_free_irq;
+
return mv_chan;
err_free_irq:
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cff1b143fff5..eec79fdf27a5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan)
struct dma_pl330_desc *desc;
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
- LIST_HEAD(list);
bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 1617715aa6e0..cb860cb53c27 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
/* allocate enough room to accomodate the number of entries */
- async_desc = kzalloc(sizeof(*async_desc) +
- (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+ async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
+ GFP_NOWAIT);
if (!async_desc)
goto err_out;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 43d4b00b8138..411f91fde734 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
desc = &mdesc->desc;
last_cookie = desc->cookie;
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+
spin_lock_irqsave(&mchan->lock, irqflags);
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else {
+ result.result = DMA_TRANS_ABORTED;
+ }
+
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
- llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
dmaengine_desc_get_callback(desc, &cb);
dma_run_dependencies(desc);
spin_lock_irqsave(&mchan->lock, irqflags);
list_move(&mdesc->node, &mchan->free);
-
- if (llstat == DMA_COMPLETE) {
- mchan->last_success = last_cookie;
- result.result = DMA_TRANS_NOERROR;
- } else
- result.result = DMA_TRANS_ABORTED;
-
spin_unlock_irqrestore(&mchan->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, &result);
@@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
if (!mdesc)
return NULL;
+ mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
src, dest, len, flags,
HIDMA_TRE_MEMCPY);
@@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
if (!mdesc)
return NULL;
+ mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
value, dest, len, flags,
HIDMA_TRE_MEMSET);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index d64edeb6771a..681de12f4c67 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- platform_driver_register(&hidma_mgmt_driver);
+ return platform_driver_register(&hidma_mgmt_driver);
- return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 784d5f1a473b..3fae23768b47 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -705,7 +705,6 @@ static int sa11x0_dma_device_pause(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- LIST_HEAD(head);
unsigned long flags;
dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
@@ -732,7 +731,6 @@ static int sa11x0_dma_device_resume(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- LIST_HEAD(head);
unsigned long flags;
dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 7f7184c3cf95..59403f6d008a 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -694,6 +694,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index e2f016700fcc..48431e2da987 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -580,15 +580,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
{
- struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- int ret;
-
- ret = pm_runtime_get_sync(chan->device->dev);
- if (ret < 0)
- return ret;
-
- schan->dev_id = SPRD_DMA_SOFTWARE_UID;
- return 0;
+ return pm_runtime_get_sync(chan->device->dev);
}
static void sprd_dma_free_chan_resources(struct dma_chan *chan)
@@ -1021,13 +1013,10 @@ static void sprd_dma_free_desc(struct virt_dma_desc *vd)
static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
- u32 req = *(u32 *)param;
+ u32 slave_id = *(u32 *)param;
- if (req < sdev->total_chns)
- return req == schan->chn_num + 1;
- else
- return false;
+ schan->dev_id = slave_id;
+ return true;
}
static int sprd_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 07c20aa2e955..bc7a1de3f29b 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -243,8 +243,7 @@ static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
struct st_fdma_desc *fdesc;
int i;
- fdesc = kzalloc(sizeof(*fdesc) +
- sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
+ fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
if (!fdesc)
return NULL;
@@ -294,8 +293,6 @@ static void st_fdma_free_chan_res(struct dma_chan *chan)
struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
unsigned long flags;
- LIST_HEAD(head);
-
dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
__func__, fchan->vchan.chan.chan_id);
@@ -626,7 +623,6 @@ static void st_fdma_issue_pending(struct dma_chan *chan)
static int st_fdma_pause(struct dma_chan *chan)
{
unsigned long flags;
- LIST_HEAD(head);
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
int ch_id = fchan->vchan.chan.chan_id;
unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 4903a408fc14..ba239b529fa9 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -641,12 +642,13 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
{
struct stm32_dma_chan *chan = devid;
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 status, scr;
+ u32 status, scr, sfcr;
spin_lock(&chan->vchan.lock);
status = stm32_dma_irq_status(chan);
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
if (status & STM32_DMA_TCI) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -661,10 +663,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (status & STM32_DMA_FEI) {
stm32_dma_irq_clear(chan, STM32_DMA_FEI);
status &= ~STM32_DMA_FEI;
- if (!(scr & STM32_DMA_SCR_EN))
- dev_err(chan2dev(chan), "FIFO Error\n");
- else
- dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
+ if (sfcr & STM32_DMA_SFCR_FEIE) {
+ if (!(scr & STM32_DMA_SCR_EN))
+ dev_err(chan2dev(chan), "FIFO Error\n");
+ else
+ dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
+ }
}
if (status) {
stm32_dma_irq_clear(chan, status);
@@ -1112,15 +1116,14 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
int ret;
chan->config_init = false;
- ret = clk_prepare_enable(dmadev->clk);
- if (ret < 0) {
- dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+
+ ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ if (ret < 0)
return ret;
- }
ret = stm32_dma_disable_chan(chan);
if (ret < 0)
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
return ret;
}
@@ -1140,7 +1143,7 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
}
@@ -1240,6 +1243,12 @@ static int stm32_dma_probe(struct platform_device *pdev)
return PTR_ERR(dmadev->clk);
}
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+ return ret;
+ }
+
dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
"st,mem2mem");
@@ -1289,7 +1298,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret)
- return ret;
+ goto clk_free;
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
@@ -1321,20 +1330,58 @@ static int stm32_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dmadev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
+
dev_info(&pdev->dev, "STM32 DMA driver registered\n");
return 0;
err_unregister:
dma_async_device_unregister(dd);
+clk_free:
+ clk_disable_unprepare(dmadev->clk);
return ret;
}
+#ifdef CONFIG_PM
+static int stm32_dma_runtime_suspend(struct device *dev)
+{
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dmadev->clk);
+
+ return 0;
+}
+
+static int stm32_dma_runtime_resume(struct device *dev)
+{
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
+ stm32_dma_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_dma_driver = {
.driver = {
.name = "stm32-dma",
.of_match_table = stm32_dma_of_match,
+ .pm = &stm32_dma_pm_ops,
},
};
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index b922db90939a..a67119199c45 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -79,8 +80,7 @@ static void stm32_dmamux_free(struct device *dev, void *route_data)
stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
clear_bit(mux->chan_id, dmamux->dma_inuse);
- if (!IS_ERR(dmamux->clk))
- clk_disable(dmamux->clk);
+ pm_runtime_put_sync(dev);
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -146,13 +146,10 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
/* Set dma request */
spin_lock_irqsave(&dmamux->lock, flags);
- if (!IS_ERR(dmamux->clk)) {
- ret = clk_enable(dmamux->clk);
- if (ret < 0) {
- spin_unlock_irqrestore(&dmamux->lock, flags);
- dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret);
- goto error;
- }
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ goto error;
}
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -254,6 +251,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
stm32_dmamux->dmamux_requests);
}
+ pm_runtime_get_noresume(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iomem = devm_ioremap_resource(&pdev->dev, res);
@@ -282,6 +280,8 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
platform_set_drvdata(pdev, stm32_dmamux);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
if (!IS_ERR(stm32_dmamux->clk)) {
ret = clk_prepare_enable(stm32_dmamux->clk);
@@ -291,17 +291,52 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
}
+ pm_runtime_get_noresume(&pdev->dev);
+
/* Reset the dmamux */
for (i = 0; i < stm32_dmamux->dma_requests; i++)
stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
- if (!IS_ERR(stm32_dmamux->clk))
- clk_disable(stm32_dmamux->clk);
+ pm_runtime_put(&pdev->dev);
return of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
}
+#ifdef CONFIG_PM
+static int stm32_dmamux_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(stm32_dmamux->clk);
+
+ return 0;
+}
+
+static int stm32_dmamux_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(stm32_dmamux->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_dmamux_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
+ stm32_dmamux_runtime_resume, NULL)
+};
+
static const struct of_device_id stm32_dmamux_match[] = {
{ .compatible = "st,stm32h7-dmamux" },
{},
@@ -312,6 +347,7 @@ static struct platform_driver stm32_dmamux_driver = {
.driver = {
.name = "stm32-dmamux",
.of_match_table = stm32_dmamux_match,
+ .pm = &stm32_dmamux_pm_ops,
},
};
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 390e4cae0e1a..4e0eede599a8 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -37,6 +37,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -1456,15 +1457,13 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
return -ENOMEM;
}
- ret = clk_prepare_enable(dmadev->clk);
- if (ret < 0) {
- dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+ ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ if (ret < 0)
return ret;
- }
ret = stm32_mdma_disable_chan(chan);
if (ret < 0)
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
return ret;
}
@@ -1484,7 +1483,7 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
dmam_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -1579,9 +1578,11 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dmadev->nr_channels = nr_channels;
dmadev->nr_requests = nr_requests;
- device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
dmadev->ahb_addr_masks,
count);
+ if (ret)
+ return ret;
dmadev->nr_ahb_addr_masks = count;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1597,6 +1598,12 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+ return ret;
+ }
+
dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(dmadev->rst)) {
reset_control_assert(dmadev->rst);
@@ -1668,6 +1675,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dmadev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
@@ -1677,11 +1688,42 @@ err_unregister:
return ret;
}
+#ifdef CONFIG_PM
+static int stm32_mdma_runtime_suspend(struct device *dev)
+{
+ struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dmadev->clk);
+
+ return 0;
+}
+
+static int stm32_mdma_runtime_resume(struct device *dev)
+{
+ struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_mdma_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
+ stm32_mdma_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_mdma_driver = {
.probe = stm32_mdma_probe,
.driver = {
.name = "stm32-mdma",
.of_match_table = stm32_mdma_of_match,
+ .pm = &stm32_mdma_pm_ops,
},
};
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 9a558e30c461..cf462b1abc0b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -38,6 +38,9 @@
#include "dmaengine.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/tegra_apb_dma.h>
+
#define TEGRA_APBDMA_GENERAL 0x0
#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
@@ -146,7 +149,7 @@ struct tegra_dma_channel_regs {
};
/*
- * tegra_dma_sg_req: Dma request details to configure hardware. This
+ * tegra_dma_sg_req: DMA request details to configure hardware. This
* contains the details for one transfer to configure DMA hw.
* The client's request for data transfer can be broken into multiple
* sub-transfer as per requester details and hw support.
@@ -155,7 +158,7 @@ struct tegra_dma_channel_regs {
*/
struct tegra_dma_sg_req {
struct tegra_dma_channel_regs ch_regs;
- int req_len;
+ unsigned int req_len;
bool configured;
bool last_sg;
struct list_head node;
@@ -169,8 +172,8 @@ struct tegra_dma_sg_req {
*/
struct tegra_dma_desc {
struct dma_async_tx_descriptor txd;
- int bytes_requested;
- int bytes_transferred;
+ unsigned int bytes_requested;
+ unsigned int bytes_transferred;
enum dma_status dma_status;
struct list_head node;
struct list_head tx_list;
@@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
/* tegra_dma_channel: Channel specific information */
struct tegra_dma_channel {
struct dma_chan dma_chan;
- char name[30];
+ char name[12];
bool config_init;
int id;
int irq;
@@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
struct tegra_dma_sg_req *hsgreq = NULL;
if (list_empty(&tdc->pending_sg_req)) {
- dev_err(tdc2dev(tdc), "Dma is running without req\n");
+ dev_err(tdc2dev(tdc), "DMA is running without req\n");
tegra_dma_stop(tdc);
return false;
}
@@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!hsgreq->configured) {
tegra_dma_stop(tdc);
- dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
+ dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
tegra_dma_abort_all(tdc);
return false;
}
@@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
dma_desc = sgreq->dma_desc;
- dma_desc->bytes_transferred += sgreq->req_len;
+ /* if we dma for long enough the transfer count will wrap */
+ dma_desc->bytes_transferred =
+ (dma_desc->bytes_transferred + sgreq->req_len) %
+ dma_desc->bytes_requested;
/* Callback need to be call */
if (!dma_desc->cb_count)
@@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data)
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0;
+ trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
+ cb.callback);
spin_unlock_irqrestore(&tdc->lock, flags);
while (cb_count--)
dmaengine_desc_callback_invoke(&cb, NULL);
@@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
spin_lock_irqsave(&tdc->lock, flags);
+ trace_tegra_dma_isr(&tdc->dma_chan, irq);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
@@ -843,6 +852,7 @@ found:
dma_set_residue(txstate, residual);
}
+ trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
return 0;
default:
- dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
+ dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
return -EINVAL;
}
return -EINVAL;
@@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
enum dma_slave_buswidth slave_bw;
if (!tdc->config_init) {
- dev_err(tdc2dev(tdc), "dma channel is not configured\n");
+ dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
return NULL;
}
if (sg_len < 1) {
@@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
dma_desc = tegra_dma_desc_get(tdc);
if (!dma_desc) {
- dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
+ dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
return NULL;
}
INIT_LIST_HEAD(&dma_desc->tx_list);
@@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
if ((len & 3) || (mem & 3) ||
(len > tdc->tdma->chip_data->max_dma_count)) {
dev_err(tdc2dev(tdc),
- "Dma length/memory address is not supported\n");
+ "DMA length/memory address is not supported\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
sg_req = tegra_dma_sg_req_get(tdc);
if (!sg_req) {
- dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+ dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
@@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
* terminating the DMA.
*/
if (tdc->busy) {
- dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
+ dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
return NULL;
}
@@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
while (remain_len) {
sg_req = tegra_dma_sg_req_get(tdc);
if (!sg_req) {
- dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+ dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
@@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
return -ENODEV;
}
- tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
- sizeof(struct tegra_dma_channel), GFP_KERNEL);
+ tdma = devm_kzalloc(&pdev->dev,
+ struct_size(tdma, channels, cdata->nr_channels),
+ GFP_KERNEL);
if (!tdma)
return -ENOMEM;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b26256f23d67..5ec0dd97b397 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -678,8 +678,9 @@ static int tegra_adma_probe(struct platform_device *pdev)
return -ENODEV;
}
- tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
- sizeof(struct tegra_adma_chan), GFP_KERNEL);
+ tdma = devm_kzalloc(&pdev->dev,
+ struct_size(tdma, channels, cdata->nr_channels),
+ GFP_KERNEL);
if (!tdma)
return -ENOMEM;
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index fc0f9c8766a8..afbb1c95b721 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -643,8 +643,8 @@ static int td_probe(struct platform_device *pdev)
DRIVER_NAME))
return -EBUSY;
- td = kzalloc(sizeof(struct timb_dma) +
- sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ td = kzalloc(struct_size(td, channels, pdata->nr_channels),
+ GFP_KERNEL);
if (!td) {
err = -ENOMEM;
goto err_release_region;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cb20b411493e..c43c1a154604 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -86,6 +86,7 @@
#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_DMA_DMASR_SG_MASK BIT(3)
#define XILINX_DMA_DMASR_IDLE BIT(1)
#define XILINX_DMA_DMASR_HALTED BIT(0)
#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
@@ -161,7 +162,9 @@
#define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */
-#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
+#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
+#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
+#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
@@ -412,7 +415,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @has_sg: Specifies whether Scatter-Gather is present or not
* @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
@@ -425,13 +427,13 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
+ * @max_buffer_len: Max buffer length
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool has_sg;
bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
@@ -444,6 +446,7 @@ struct xilinx_dma_device {
struct clk *rxs_clk;
u32 nr_channels;
u32 chan_id;
+ u32 max_buffer_len;
};
/* Macros */
@@ -960,6 +963,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
+ * @chan: Driver specific DMA channel
+ * @size: Total data that needs to be copied
+ * @done: Amount of data that has been already copied
+ *
+ * Return: Amount of data that has to be copied
+ */
+static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
+ int size, int done)
+{
+ size_t copy;
+
+ copy = min_t(size_t, size - done,
+ chan->xdev->max_buffer_len);
+
+ if ((copy + done < size) &&
+ chan->xdev->common.copy_align) {
+ /*
+ * If this is not the last descriptor, make sure
+ * the next one will be properly aligned
+ */
+ copy = rounddown(copy,
+ (1 << chan->xdev->common.copy_align));
+ }
+ return copy;
+}
+
+/**
* xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
* @cookie: Transaction identifier
@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw;
residue += (hw->control - hw->status) &
- XILINX_DMA_MAX_TRANS_LEN;
+ chan->xdev->max_buffer_len;
}
}
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
u32 reg, j;
- struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_vdma_tx_segment *segment, *last = NULL;
+ int i = 0;
/* This function was invoked with lock held */
if (chan->err)
@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_vdma_tx_segment, node);
-
- /*
- * If hardware is idle, then all descriptors on the running lists are
- * done, start new transfers
- */
- if (chan->has_sg)
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- desc->async_tx.phys);
-
/* Configure the hardware using info in the config structure */
if (chan->has_vflip) {
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /*
- * With SG, start with circular mode, so that BDs can be fetched.
- * In direct register mode, if not parking, enable circular mode
- */
- if (chan->has_sg || !config->park)
- reg |= XILINX_DMA_DMACR_CIRC_EN;
-
+ /* If not parking, enable circular mode */
if (config->park)
reg &= ~XILINX_DMA_DMACR_CIRC_EN;
+ else
+ reg |= XILINX_DMA_DMACR_CIRC_EN;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
@@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
return;
/* Start the transfer */
- if (chan->has_sg) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- } else {
- struct xilinx_vdma_tx_segment *segment, *last = NULL;
- int i = 0;
-
- if (chan->desc_submitcount < chan->num_frms)
- i = chan->desc_submitcount;
-
- list_for_each_entry(segment, &desc->segments, node) {
- if (chan->ext_addr)
- vdma_desc_write_64(chan,
- XILINX_VDMA_REG_START_ADDRESS_64(i++),
- segment->hw.buf_addr,
- segment->hw.buf_addr_msb);
- else
- vdma_desc_write(chan,
+ if (chan->desc_submitcount < chan->num_frms)
+ i = chan->desc_submitcount;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ if (chan->ext_addr)
+ vdma_desc_write_64(chan,
+ XILINX_VDMA_REG_START_ADDRESS_64(i++),
+ segment->hw.buf_addr,
+ segment->hw.buf_addr_msb);
+ else
+ vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
- last = segment;
- }
+ last = segment;
+ }
- if (!last)
- return;
+ if (!last)
+ return;
- /* HW expects these parameters to be same for one transaction */
- vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
- vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
- last->hw.stride);
- vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+ /* HW expects these parameters to be same for one transaction */
+ vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
+ last->hw.stride);
+ vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- chan->desc_submitcount++;
- chan->desc_pendingcount--;
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
- if (chan->desc_submitcount == chan->num_frms)
- chan->desc_submitcount = 0;
- }
+ chan->desc_submitcount++;
+ chan->desc_pendingcount--;
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
+ if (chan->desc_submitcount == chan->num_frms)
+ chan->desc_submitcount = 0;
chan->idle = false;
}
@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw;
- if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+ if (!len || len > chan->xdev->max_buffer_len)
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
+ copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
+ sg_used);
hw = &segment->hw;
/* Fill in the descriptor */
@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
- copy = min_t(size_t, period_len - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
+ copy = xilinx_dma_calc_copysize(chan, period_len,
+ sg_used);
hw = &segment->hw;
xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
period_len * i);
@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
- chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr;
/* This variable ensures that descriptors are not
@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->stop_transfer = xilinx_dma_stop_transfer;
}
+ /* check if SG is enabled (only for AXIDMA and CDMA) */
+ if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
+ if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_SG_MASK)
+ chan->has_sg = true;
+ dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
+ chan->has_sg ? "enabled" : "disabled");
+ }
+
/* Initialize the tasklet */
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
(unsigned long)chan);
@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
- u32 num_frames, addr_width;
+ u32 num_frames, addr_width, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return PTR_ERR(xdev->regs);
/* Retrieve the DMA engine properties from the device tree */
- xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+ if (!of_property_read_u32(node, "xlnx,sg-length-width",
+ &len_width)) {
+ if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
+ len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
+ dev_warn(xdev->dev,
+ "invalid xlnx,sg-length-width property value. Using default width\n");
+ } else {
+ if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
+ dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
+ xdev->max_buffer_len =
+ GENMASK(len_width - 1, 0);
+ }
+ }
+ }
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index ebb3fa2e1d00..362aa5450a5e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2032,10 +2032,19 @@ setup_hw(struct hfc_pci *hc)
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
+ if (unlikely(!hc->hw.pci_io)) {
+ printk(KERN_WARNING
+ "HFC-PCI: Error in ioremap for PCI!\n");
+ pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos,
+ hc->hw.dmahandle);
+ return 1;
+ }
+
printk(KERN_INFO
"HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
(u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
(u_long) hc->hw.dmahandle, hc->irq, HZ);
+
/* enable memory mapped ports, disable busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
hc->hw.int_m2 = 0;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 3eeb12e93e98..d86e7a4ac04d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -205,4 +205,15 @@ config MTK_CMDQ_MBOX
mailbox driver. The CMDQ is used to help read/write registers with
critical time limitation, such as updating display configuration
during the vblank.
+
+config ZYNQMP_IPI_MBOX
+ bool "Xilinx ZynqMP IPI Mailbox"
+ depends on ARCH_ZYNQMP && OF
+ help
+ Say yes here to add support for Xilinx IPI mailbox driver.
+ This mailbox driver is used to send notification or short message
+ between processors with Xilinx ZynqMP IPI. It will place the
+ message to the IPI buffer and will access the IPI control
+ registers to kick the other processor or enquire status.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c818b5d011ae..8be3bcbcf882 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -44,3 +44,5 @@ obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+
+obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 774362a05159..85fc5b56f99b 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -187,8 +187,8 @@ static int imx_mu_startup(struct mbox_chan *chan)
return 0;
}
- ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc,
- chan);
+ ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED |
+ IRQF_NO_SUSPEND, cp->irq_desc, chan);
if (ret) {
dev_err(priv->dev,
"Unable to acquire IRQ %d\n", priv->irq);
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 58bfafc34bc4..4e4ac4be6423 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -31,7 +31,6 @@
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
static bool mbox_data_ready;
-static struct dentry *root_debugfs_dir;
struct mbox_test_device {
struct device *dev;
@@ -45,6 +44,7 @@ struct mbox_test_device {
spinlock_t lock;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
+ struct dentry *root_debugfs_dir;
};
static ssize_t mbox_test_signal_write(struct file *filp,
@@ -262,16 +262,16 @@ static int mbox_test_add_debugfs(struct platform_device *pdev,
if (!debugfs_initialized())
return 0;
- root_debugfs_dir = debugfs_create_dir("mailbox", NULL);
- if (!root_debugfs_dir) {
+ tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+ if (!tdev->root_debugfs_dir) {
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
return -EINVAL;
}
- debugfs_create_file("message", 0600, root_debugfs_dir,
+ debugfs_create_file("message", 0600, tdev->root_debugfs_dir,
tdev, &mbox_test_message_ops);
- debugfs_create_file("signal", 0200, root_debugfs_dir,
+ debugfs_create_file("signal", 0200, tdev->root_debugfs_dir,
tdev, &mbox_test_signal_ops);
return 0;
@@ -363,22 +363,24 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- size = resource_size(res);
tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
/* if reserved area in SRAM, try just ioremap */
+ size = resource_size(res);
tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
- else if (IS_ERR(tdev->tx_mmio))
+ } else if (IS_ERR(tdev->tx_mmio)) {
tdev->tx_mmio = NULL;
+ }
/* If specified, second reg entry is Rx MMIO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- size = resource_size(res);
tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
+ size = resource_size(res);
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
- else if (IS_ERR(tdev->rx_mmio))
+ } else if (IS_ERR(tdev->rx_mmio)) {
tdev->rx_mmio = tdev->tx_mmio;
+ }
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
@@ -416,7 +418,7 @@ static int mbox_test_remove(struct platform_device *pdev)
{
struct mbox_test_device *tdev = platform_get_drvdata(pdev);
- debugfs_remove_recursive(root_debugfs_dir);
+ debugfs_remove_recursive(tdev->root_debugfs_dir);
if (tdev->tx_channel)
mbox_free_channel(tdev->tx_channel);
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index a338bd4cd7db..210fe504f5ae 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -270,14 +270,12 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
goto err_clk;
}
- device_init_wakeup(dev, true);
+ device_set_wakeup_capable(dev, true);
ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
if (ret) {
dev_err(dev, "Failed to set wake up irq\n");
goto err_init_wkp;
}
- } else {
- device_init_wakeup(dev, false);
}
/* mailbox controller */
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index e443f6a2ec4b..11fc9fd6a94a 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -779,7 +779,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_hsp_resume(struct device *dev)
+static int __maybe_unused tegra_hsp_resume(struct device *dev)
{
struct tegra_hsp *hsp = dev_get_drvdata(dev);
unsigned int i;
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
new file mode 100644
index 000000000000..86887c9a349a
--- /dev/null
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+/* IPI agent ID any */
+#define IPI_ID_ANY 0xFFUL
+
+/* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */
+#define USE_SMC 0
+#define USE_HVC 1
+
+/* Default IPI SMC function IDs */
+#define SMC_IPI_MAILBOX_OPEN 0x82001000U
+#define SMC_IPI_MAILBOX_RELEASE 0x82001001U
+#define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U
+#define SMC_IPI_MAILBOX_NOTIFY 0x82001003U
+#define SMC_IPI_MAILBOX_ACK 0x82001004U
+#define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U
+#define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U
+
+/* IPI SMC Macros */
+#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be disabled.
+ */
+#define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be enabled.
+ */
+
+/* IPI mailbox status */
+#define IPI_MB_STATUS_IDLE 0
+#define IPI_MB_STATUS_SEND_PENDING 1
+#define IPI_MB_STATUS_RECV_PENDING 2
+
+#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
+#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
+
+/**
+ * struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
+ * @is_opened: indicate if the IPI channel is opened
+ * @req_buf: local to remote request buffer start address
+ * @resp_buf: local to remote response buffer start address
+ * @req_buf_size: request buffer size
+ * @resp_buf_size: response buffer size
+ * @rx_buf: receive buffer to pass received message to client
+ * @chan_type: channel type
+ */
+struct zynqmp_ipi_mchan {
+ int is_opened;
+ void __iomem *req_buf;
+ void __iomem *resp_buf;
+ void *rx_buf;
+ size_t req_buf_size;
+ size_t resp_buf_size;
+ unsigned int chan_type;
+};
+
+/**
+ * struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
+ * platform data.
+ * @pdata: pointer to the IPI private data
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI mailbox
+ * @remote_id: remote IPI agent ID
+ * @mbox: mailbox Controller
+ * @mchans: array for channels, tx channel and rx channel.
+ * @irq: IPI agent interrupt ID
+ */
+struct zynqmp_ipi_mbox {
+ struct zynqmp_ipi_pdata *pdata;
+ struct device dev;
+ u32 remote_id;
+ struct mbox_controller mbox;
+ struct zynqmp_ipi_mchan mchans[2];
+};
+
+/**
+ * struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data.
+ *
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI agent
+ * @irq: IPI agent interrupt ID
+ * @method: IPI SMC or HVC is going to be used
+ * @local_id: local IPI agent ID
+ * @num_mboxes: number of mailboxes of this IPI agent
+ * @ipi_mboxes: IPI mailboxes of this IPI agent
+ */
+struct zynqmp_ipi_pdata {
+ struct device *dev;
+ int irq;
+ unsigned int method;
+ u32 local_id;
+ int num_mboxes;
+ struct zynqmp_ipi_mbox *ipi_mboxes;
+};
+
+static struct device_driver zynqmp_ipi_mbox_driver = {
+ .owner = THIS_MODULE,
+ .name = "zynqmp-ipi-mbox",
+};
+
+static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox,
+ unsigned long a0, unsigned long a3,
+ struct arm_smccc_res *res)
+{
+ struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata;
+ unsigned long a1, a2;
+
+ a1 = pdata->local_id;
+ a2 = ipi_mbox->remote_id;
+ if (pdata->method == USE_SMC)
+ arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+ else
+ arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+}
+
+/**
+ * zynqmp_ipi_interrupt - Interrupt handler for IPI notification
+ *
+ * @irq: Interrupt number
+ * @data: ZynqMP IPI mailbox platform data.
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+{
+ struct zynqmp_ipi_pdata *pdata = data;
+ struct mbox_chan *chan;
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ struct zynqmp_ipi_mchan *mchan;
+ struct zynqmp_ipi_message *msg;
+ u64 arg0, arg3;
+ struct arm_smccc_res res;
+ int ret, i;
+
+ (void)irq;
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK;
+ for (i = 0; i < pdata->num_mboxes; i++) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX];
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ if (mchan->is_opened) {
+ msg = mchan->rx_buf;
+ msg->len = mchan->req_buf_size;
+ memcpy_fromio(msg->data, mchan->req_buf,
+ msg->len);
+ mbox_chan_received_data(chan, (void *)msg);
+ return IRQ_HANDLED;
+ }
+ }
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * zynqmp_ipi_peek_data - Peek to see if there are any rx messages.
+ *
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool zynqmp_ipi_peek_data(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* TX channel, check if the message has been acked
+ * by the remote, if yes, response is available.
+ */
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ else
+ return true;
+ } else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ /* RX channel, check if there is message arrived. */
+ return true;
+ }
+ return false;
+}
+
+/**
+ * zynqmp_ipi_last_tx_done - See if the last tx message is sent
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* We only need to check if the message been taken
+ * by the remote in the TX channel
+ */
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ return true;
+ }
+ /* Always true for the response message in RX channel */
+ return true;
+}
+
+/**
+ * zynqmp_ipi_send_data - Send data
+ *
+ * @chan: Channel Pointer
+ * @data: Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ struct zynqmp_ipi_message *msg = data;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* Send request message */
+ if (msg && msg->len > mchan->req_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->req_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->req_buf, msg->data, msg->len);
+ /* Kick IPI mailbox to send message */
+ arg0 = SMC_IPI_MAILBOX_NOTIFY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ } else {
+ /* Send response message */
+ if (msg && msg->len > mchan->resp_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->resp_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->resp_buf, msg->data, msg->len);
+ arg0 = SMC_IPI_MAILBOX_ACK;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
+ &res);
+ }
+ return 0;
+}
+
+/**
+ * zynqmp_ipi_startup - Startup the IPI channel
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int zynqmp_ipi_startup(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ int ret = 0;
+ unsigned int nchan_type;
+
+ if (mchan->is_opened)
+ return 0;
+
+ /* If no channel has been opened, open the IPI mailbox */
+ nchan_type = (mchan->chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[nchan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_OPEN;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0) {
+ dev_err(dev, "SMC to open the IPI channel failed.\n");
+ return ret;
+ }
+ ret = 0;
+ }
+
+ /* If it is RX channel, enable the IPI notification interrupt */
+ if (mchan->chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ mchan->is_opened = 1;
+
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_shutdown - Shutdown the IPI channel
+ *
+ * @chan: Channel pointer
+ */
+static void zynqmp_ipi_shutdown(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ unsigned int chan_type;
+
+ if (!mchan->is_opened)
+ return;
+
+ /* If it is RX channel, disable notification interrupt */
+ chan_type = mchan->chan_type;
+ if (chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ /* Release IPI mailbox if no other channel is opened */
+ chan_type = (chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[chan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_RELEASE;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+
+ mchan->is_opened = 0;
+}
+
+/* ZynqMP IPI mailbox operations */
+static const struct mbox_chan_ops zynqmp_ipi_chan_ops = {
+ .startup = zynqmp_ipi_startup,
+ .shutdown = zynqmp_ipi_shutdown,
+ .peek_data = zynqmp_ipi_peek_data,
+ .last_tx_done = zynqmp_ipi_last_tx_done,
+ .send_data = zynqmp_ipi_send_data,
+};
+
+/**
+ * zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel
+ *
+ * @mbox: mailbox controller pointer
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel, else return error pointer.
+ */
+static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct mbox_chan *chan;
+ struct device *dev = mbox->dev;
+ unsigned int chan_type;
+
+ /* Only supports TX and RX channels */
+ chan_type = p->args[0];
+ if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) {
+ dev_err(dev, "req chnl failure: invalid chnl type %u.\n",
+ chan_type);
+ return ERR_PTR(-EINVAL);
+ }
+ chan = &mbox->chans[chan_type];
+ return chan;
+}
+
+static const struct of_device_id zynqmp_ipi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-ipi-mailbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
+
+/**
+ * zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
+ *
+ * @node: IPI mbox device child node
+ * @name: name of the IPI buffer
+ * @res: pointer to where the resource information will be stored.
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node,
+ const char *name,
+ struct resource *res)
+{
+ int ret, index;
+
+ index = of_property_match_string(node, "reg-names", name);
+ if (index >= 0) {
+ ret = of_address_to_resource(node, index, res);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev
+ *
+ * @dev: the ipi mailbox device
+ *
+ * This is to avoid the no device release() function kernel warning.
+ *
+ */
+static void zynqmp_ipi_mbox_dev_release(struct device *dev)
+{
+ (void)dev;
+}
+
+/**
+ * zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *mchan;
+ struct mbox_chan *chans;
+ struct mbox_controller *mbox;
+ struct resource res;
+ struct device *dev, *mdev;
+ const char *name;
+ int ret;
+
+ dev = ipi_mbox->pdata->dev;
+ /* Initialize dev for IPI mailbox */
+ ipi_mbox->dev.parent = dev;
+ ipi_mbox->dev.release = NULL;
+ ipi_mbox->dev.of_node = node;
+ dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(&ipi_mbox->dev, ipi_mbox);
+ ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release;
+ ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver;
+ ret = device_register(&ipi_mbox->dev);
+ if (ret) {
+ dev_err(dev, "Failed to register ipi mbox dev.\n");
+ return ret;
+ }
+ mdev = &ipi_mbox->dev;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ name = "local_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (IS_ERR(mchan->req_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->req_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
+ return ret;
+ }
+
+ name = "remote_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (IS_ERR(mchan->resp_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->resp_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ name = "remote_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (IS_ERR(mchan->req_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->req_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+
+ name = "local_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (IS_ERR(mchan->resp_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->resp_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ /* Get the IPI remote agent ID */
+ ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI remote ID is specified.\n");
+ return ret;
+ }
+
+ mbox = &ipi_mbox->mbox;
+ mbox->dev = mdev;
+ mbox->ops = &zynqmp_ipi_chan_ops;
+ mbox->num_chans = 2;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 5;
+ mbox->of_xlate = zynqmp_ipi_of_xlate;
+ chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ mbox->chans = chans;
+ chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
+ ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
+ ret = devm_mbox_controller_register(mdev, mbox);
+ if (ret)
+ dev_err(mdev,
+ "Failed to register mbox_controller(%d)\n", ret);
+ else
+ dev_info(mdev,
+ "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
+ *
+ * @pdata: IPI private data
+ */
+static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+{
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ int i;
+
+ i = pdata->num_mboxes;
+ for (; i >= 0; i--) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ if (ipi_mbox->dev.parent) {
+ mbox_controller_unregister(&ipi_mbox->mbox);
+ device_unregister(&ipi_mbox->dev);
+ }
+ }
+}
+
+static int zynqmp_ipi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *nc, *np = pdev->dev.of_node;
+ struct zynqmp_ipi_pdata *pdata;
+ struct zynqmp_ipi_mbox *mbox;
+ int num_mboxes, ret = -EINVAL;
+
+ num_mboxes = of_get_child_count(np);
+ pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->dev = dev;
+
+ /* Get the IPI local agents ID */
+ ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI local ID is specified.\n");
+ return ret;
+ }
+
+ pdata->num_mboxes = num_mboxes;
+ pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
+ ((char *)pdata + sizeof(*pdata));
+
+ mbox = pdata->ipi_mboxes;
+ for_each_available_child_of_node(np, nc) {
+ mbox->pdata = pdata;
+ ret = zynqmp_ipi_mbox_probe(mbox, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdev.\n");
+ ret = -EINVAL;
+ goto free_mbox_dev;
+ }
+ mbox++;
+ }
+
+ /* IPI IRQ */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "unable to find IPI IRQ.\n");
+ goto free_mbox_dev;
+ }
+ pdata->irq = ret;
+ ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
+ IRQF_SHARED, dev_name(dev), pdata);
+ if (ret) {
+ dev_err(dev, "IRQ %d is not requested successfully.\n",
+ pdata->irq);
+ goto free_mbox_dev;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+ return ret;
+
+free_mbox_dev:
+ zynqmp_ipi_free_mboxes(pdata);
+ return ret;
+}
+
+static int zynqmp_ipi_remove(struct platform_device *pdev)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ zynqmp_ipi_free_mboxes(pdata);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ipi_driver = {
+ .probe = zynqmp_ipi_probe,
+ .remove = zynqmp_ipi_remove,
+ .driver = {
+ .name = "zynqmp-ipi",
+ .of_match_table = of_match_ptr(zynqmp_ipi_of_match),
+ },
+};
+
+static int __init zynqmp_ipi_init(void)
+{
+ return platform_driver_register(&zynqmp_ipi_driver);
+}
+subsys_initcall(zynqmp_ipi_init);
+
+static void __exit zynqmp_ipi_exit(void)
+{
+ platform_driver_unregister(&zynqmp_ipi_driver);
+}
+module_exit(zynqmp_ipi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver");
+MODULE_AUTHOR("Xilinx Inc.");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 22547d7a84ea..947a8adbc799 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -974,6 +974,36 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ /* Check a specific PEB for bitflips and scrub it if needed */
+ case UBI_IOCRPEB:
+ {
+ int pnum;
+
+ err = get_user(pnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_bitflip_check(ubi, pnum, 0);
+ break;
+ }
+
+ /* Force scrubbing for a specific PEB */
+ case UBI_IOCSPEB:
+ {
+ int pnum;
+
+ err = get_user(pnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_bitflip_check(ubi, pnum, 1);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index d47b9e436e67..a1b9e764d489 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -929,6 +929,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int ubi_is_erase_work(struct ubi_work *wrk);
void ubi_refill_pools(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6f2ac865ff05..2709dc02fc24 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -278,6 +278,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
+ * in_pq - check if a wear-leveling entry is present in the protection queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns non-zero if @e is in the protection queue and zero
+ * if it is not.
+ */
+static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 1;
+
+ return 0;
+}
+
+/**
* prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object
* @e: the physical eraseblock to add
@@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
return err;
}
+static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ if (in_wl_tree(e, &ubi->scrub))
+ return false;
+ else if (in_wl_tree(e, &ubi->erroneous))
+ return false;
+ else if (ubi->move_from == e)
+ return false;
+ else if (ubi->move_to == e)
+ return false;
+
+ return true;
+}
+
+/**
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ * @force: dont't read the block, assume bitflips happened and take action.
+ *
+ * This function reads the given eraseblock and checks if bitflips occured.
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
+ * If scrubbing is forced with @force, the eraseblock is not read,
+ * but scheduled for scrubbing right away.
+ *
+ * Returns:
+ * %EINVAL, PEB is out of range
+ * %ENOENT, PEB is no longer used by UBI
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
+ * %0, no bit flips detected
+ */
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+ if (pnum < 0 || pnum >= ubi->peb_count) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Pause all parallel work, otherwise it can happen that the
+ * erase worker frees a wl entry under us.
+ */
+ down_write(&ubi->work_sem);
+
+ /*
+ * Make sure that the wl entry does not change state while
+ * inspecting it.
+ */
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ spin_unlock(&ubi->wl_lock);
+ err = -ENOENT;
+ goto out_resume;
+ }
+
+ /*
+ * Does it make sense to check this PEB?
+ */
+ if (!scrub_possible(ubi, e)) {
+ spin_unlock(&ubi->wl_lock);
+ err = -EBUSY;
+ goto out_resume;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ if (!force) {
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ mutex_unlock(&ubi->buf_mutex);
+ }
+
+ if (force || err == UBI_IO_BITFLIPS) {
+ /*
+ * Okay, bit flip happened, let's figure out what we can do.
+ */
+ spin_lock(&ubi->wl_lock);
+
+ /*
+ * Recheck. We released wl_lock, UBI might have killed the
+ * wl entry under us.
+ */
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ spin_unlock(&ubi->wl_lock);
+ err = -ENOENT;
+ goto out_resume;
+ }
+
+ /*
+ * Need to re-check state
+ */
+ if (!scrub_possible(ubi, e)) {
+ spin_unlock(&ubi->wl_lock);
+ err = -EBUSY;
+ goto out_resume;
+ }
+
+ if (in_pq(ubi, e)) {
+ prot_queue_del(ubi, e->pnum);
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ensure_wear_leveling(ubi, 1);
+ } else if (in_wl_tree(e, &ubi->used)) {
+ rb_erase(&e->u.rb, &ubi->used);
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ensure_wear_leveling(ubi, 1);
+ } else if (in_wl_tree(e, &ubi->free)) {
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * This PEB is empty we can schedule it for
+ * erasure right away. No wear leveling needed.
+ */
+ err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
+ force ? 0 : 1, true);
+ } else {
+ spin_unlock(&ubi->wl_lock);
+ err = -EAGAIN;
+ }
+
+ if (!err && !force)
+ err = -EUCLEAN;
+ } else {
+ err = 0;
+ }
+
+out_resume:
+ up_write(&ubi->work_sem);
+out:
+
+ return err;
+}
+
/**
* tree_destroy - destroy an RB-tree.
* @ubi: UBI device description object
@@ -1848,16 +2013,11 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
- struct ubi_wl_entry *p;
- int i;
-
if (!ubi_dbg_chk_gen(ubi))
return 0;
- for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
- list_for_each_entry(p, &ubi->pq[i], u.list)
- if (p == e)
- return 0;
+ if (in_pq(ubi, e))
+ return 0;
ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 61e43802b9a5..645efac6310d 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -289,6 +289,11 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
virt = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (unlikely(!virt)) {
+ pcmcia_release_window(link, link->resource[2]);
+ return NULL;
+ }
+
for (i = 0; i < NR_INFO; i++) {
pcmcia_map_mem_page(link, link->resource[2],
hw_info[i].offset & ~(resource_size(link->resource[2])-1));
@@ -1423,6 +1428,11 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
/* Try scribbling on the buffer */
info->base = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (unlikely(!info->base)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
for (i = 0; i < (TX_PAGES<<8); i += 2)
__raw_writew((i>>1), info->base+offset+i);
udelay(100);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index e21bf3724611..1c50c10b5a16 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1211,6 +1211,11 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate soft command\n");
+ return -ENOMEM;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1684,6 +1689,11 @@ int liquidio_set_fec(struct lio *lio, int on_off)
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
sizeof(struct oct_nic_seapi_resp), 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to allocate soft command\n");
+ return -ENOMEM;
+ }
ncmd = sc->virtdptr;
resp = sc->virtrptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 9b7819fdc9de..fb6f813cff65 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1192,6 +1192,11 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command\n");
+ return;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 503cfadff4ac..aa2be4807191 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2234,6 +2234,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
WQ_MEM_RECLAIM,
nic->vf_id);
+ if (!nic->nicvf_rx_mode_wq) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to allocate work queue\n");
+ goto err_unregister_interrupts;
+ }
+
INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
spin_lock_init(&nic->rx_mode_wq_lock);
mutex_init(&nic->rx_mode_mtx);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a69cd19a55ae..1eca0fdb9933 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -547,6 +547,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
return -1;
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
+ if (!base) {
+ pcmcia_release_window(link, link->resource[2]);
+ return -ENOMEM;
+ }
+
pcmcia_map_mem_page(link, link->resource[2], 0);
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c19e74e6ac94..a5d5d6fc1da0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
+ if (mlx4_is_mfunc(dev))
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
@@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
up_write(&priv->cmd.switch_sem);
+ if (mlx4_is_mfunc(dev))
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
return err;
}
@@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ if (mlx4_is_mfunc(dev))
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
@@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
+ priv->cmd.context = NULL;
up(&priv->cmd.poll_sem);
up_write(&priv->cmd.switch_sem);
+ if (mlx4_is_mfunc(dev))
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index eb13d3618162..4356f3a58002 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+ int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
- total_pages =
- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
- page_shift);
+ tot = (total_mem + (page_offset << 6)) >> page_shift;
+ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 0804b478ad19..a0987cc5fe4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index a1a3e2774989..a66b6ed80b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- int ret, pf_num;
+ unsigned int fn;
+ int ret;
- ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
- if (ret)
- return ret;
+ fn = PCI_FUNC(priv->mdev->pdev->devfn);
+ if (fn >= MLX5_MAX_PORTS)
+ return -EOPNOTSUPP;
if (rep->vport == MLX5_VPORT_UPLINK)
- ret = snprintf(buf, len, "p%d", pf_num);
+ ret = snprintf(buf, len, "p%d", fn);
else
- ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
+ ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
if (ret >= len)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index be396e5e4e39..3dde5c7e0739 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->protocol = *((__be16 *)(skb->data));
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (netdev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ stats->csum_complete++;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ stats->csum_none++;
+ }
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
@@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->dev = netdev;
- stats->csum_complete++;
stats->packets++;
stats->bytes += cqe_bcnt;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d0b28251abf2..ecd2c747f726 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u64 node_guid;
int err = 0;
- if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
@@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
{
struct mlx5_vport *evport;
- if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
@@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
- fw_max_bw_share >= MLX5_MIN_BW_SHARE;
- bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
struct mlx5_vport *evport;
+ u32 fw_max_bw_share;
u32 previous_min_rate;
u32 divider;
+ bool min_rate_supported;
+ bool max_rate_supported;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+
+ fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
+ fw_max_bw_share >= MLX5_MIN_BW_SHARE;
+ max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
+
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f2cfa012315e..0be3eb86dd84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node,
}
}
-static void down_write_ref_node(struct fs_node *node)
+static void down_write_ref_node(struct fs_node *node, bool locked)
{
if (node) {
- down_write(&node->lock);
+ if (!locked)
+ down_write(&node->lock);
refcount_inc(&node->refcount);
}
}
@@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node)
up_read(&node->lock);
}
-static void up_write_ref_node(struct fs_node *node)
+static void up_write_ref_node(struct fs_node *node, bool locked)
{
refcount_dec(&node->refcount);
- up_write(&node->lock);
+ if (!locked)
+ up_write(&node->lock);
}
-static void tree_put_node(struct fs_node *node)
+static void tree_put_node(struct fs_node *node, bool locked)
{
struct fs_node *parent_node = node->parent;
@@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node)
/* Only root namespace doesn't have parent and we just
* need to free its node.
*/
- down_write_ref_node(parent_node);
+ down_write_ref_node(parent_node, locked);
list_del_init(&node->list);
if (node->del_sw_func)
node->del_sw_func(node);
- up_write_ref_node(parent_node);
+ up_write_ref_node(parent_node, locked);
} else {
kfree(node);
}
node = NULL;
}
if (!node && parent_node)
- tree_put_node(parent_node);
+ tree_put_node(parent_node, locked);
}
-static int tree_remove_node(struct fs_node *node)
+static int tree_remove_node(struct fs_node *node, bool locked)
{
if (refcount_read(&node->refcount) > 1) {
refcount_dec(&node->refcount);
return -EEXIST;
}
- tree_put_node(node);
+ tree_put_node(node, locked);
return 0;
}
@@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node)
kfree(ft);
}
-static void del_sw_hw_rule(struct fs_node *node)
+static void modify_fte(struct fs_fte *fte)
{
struct mlx5_flow_root_namespace *root;
- struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
- struct fs_fte *fte;
- int modify_mask;
- struct mlx5_core_dev *dev = get_dev(node);
+ struct mlx5_core_dev *dev;
int err;
- bool update_fte = false;
- fs_get_obj(rule, node);
- fs_get_obj(fte, rule->node.parent);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
+ dev = get_dev(&fte->node);
+
+ root = find_root(&ft->node);
+ err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
+ fte->modify_mask = 0;
+}
+
+static void del_sw_hw_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
trace_mlx5_fs_del_rule(rule);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock);
@@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node)
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
- BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ fte->modify_mask |=
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
- update_fte = true;
goto out;
}
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
- update_fte = true;
+ fte->modify_mask |=
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
}
out:
- root = find_root(&ft->node);
- if (update_fte && fte->dests_size) {
- err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
- if (err)
- mlx5_core_warn(dev,
- "%s can't del rule fg id=%d fte_index=%d\n",
- __func__, fg->id, fte->index);
- }
kfree(rule);
}
@@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
+ node->active = 0;
}
}
@@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->action = *flow_act;
- tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+ tree_init_node(&fte->node, NULL, del_sw_fte);
return fte;
}
@@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent);
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
- down_write_ref_node(&fte->node);
+ down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
@@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
root = find_root(&ft->node);
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
modify_mask, fte);
- up_write_ref_node(&fte->node);
+ up_write_ref_node(&fte->node, false);
return err;
}
@@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
if (err)
goto destroy_ft;
ft->node.active = true;
- down_write_ref_node(&fs_prio->node);
+ down_write_ref_node(&fs_prio->node, false);
tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
- up_write_ref_node(&fs_prio->node);
+ up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock);
trace_mlx5_fs_add_ft(ft);
return ft;
@@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
- down_write_ref_node(&ft->node);
+ down_write_ref_node(&ft->node, false);
fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
start_index, end_index,
ft->node.children.prev);
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
if (IS_ERR(fg))
return fg;
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
if (err) {
- tree_put_node(&fg->node);
+ tree_put_node(&fg->node, false);
return ERR_PTR(err);
}
trace_mlx5_fs_add_fg(fg);
@@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head)
struct match_list *iter, *match_tmp;
list_del(&head->first.list);
- tree_put_node(&head->first.g->node);
+ tree_put_node(&head->first.g->node, false);
list_for_each_entry_safe(iter, match_tmp, &head->list,
list) {
- tree_put_node(&iter->g->node);
+ tree_put_node(&iter->g->node, false);
list_del(&iter->list);
kfree(iter);
}
@@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g,
fte_tmp = NULL;
goto out;
}
+ if (!fte_tmp->node.active) {
+ tree_put_node(&fte_tmp->node, false);
+ fte_tmp = NULL;
+ goto out;
+ }
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
else
up_read_ref_node(&g->node);
return fte_tmp;
@@ -1647,8 +1659,8 @@ search_again_locked:
continue;
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp);
- up_write_ref_node(&fte_tmp->node);
- tree_put_node(&fte_tmp->node);
+ up_write_ref_node(&fte_tmp->node, false);
+ tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1684,7 +1696,7 @@ skip_search:
err = insert_fte(g, fte);
if (err) {
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
if (err == -ENOSPC)
continue;
kmem_cache_free(steering->ftes_cache, fte);
@@ -1692,11 +1704,11 @@ skip_search:
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte);
- up_write_ref_node(&fte->node);
- tree_put_node(&fte->node);
+ up_write_ref_node(&fte->node, false);
+ tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
@@ -1738,7 +1750,7 @@ search_again_locked:
err = build_match_list(&match_head, ft, spec);
if (err) {
if (take_write)
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
else
up_read_ref_node(&ft->node);
return ERR_PTR(err);
@@ -1753,7 +1765,7 @@ search_again_locked:
if (!IS_ERR(rule) ||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
if (take_write)
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
return rule;
}
@@ -1769,12 +1781,12 @@ search_again_locked:
g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = ERR_CAST(g);
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
return rule;
}
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
err = create_auto_flow_group(ft, g);
if (err)
@@ -1793,17 +1805,17 @@ search_again_locked:
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec->match_value, flow_act, dest,
dest_num, fte);
- up_write_ref_node(&fte->node);
- tree_put_node(&fte->node);
- tree_put_node(&g->node);
+ up_write_ref_node(&fte->node, false);
+ tree_put_node(&fte->node, false);
+ tree_put_node(&g->node, false);
return rule;
err_release_fg:
- up_write_ref_node(&g->node);
- tree_put_node(&g->node);
+ up_write_ref_node(&g->node, false);
+ tree_put_node(&g->node, false);
return ERR_PTR(err);
}
@@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules);
void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
{
+ struct fs_fte *fte;
int i;
+ /* In order to consolidate the HW changes we lock the FTE for other
+ * changes, and increase its refcount, in order not to perform the
+ * "del" functions of the FTE. Will handle them here.
+ * The removal of the rules is done under locked FTE.
+ * After removing all the handle's rules, if there are remaining
+ * rules, it means we just need to modify the FTE in FW, and
+ * unlock/decrease the refcount we increased before.
+ * Otherwise, it means the FTE should be deleted. First delete the
+ * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
+ * the FTE, which will handle the last decrease of the refcount, as
+ * well as required handling of its parent.
+ */
+ fs_get_obj(fte, handle->rule[0]->node.parent);
+ down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
- tree_remove_node(&handle->rule[i]->node);
+ tree_remove_node(&handle->rule[i]->node, true);
+ if (fte->modify_mask && fte->dests_size) {
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
+ } else {
+ del_hw_fte(&fte->node);
+ up_write(&fte->node.lock);
+ tree_put_node(&fte->node, false);
+ }
kfree(handle);
}
EXPORT_SYMBOL(mlx5_del_flow_rules);
@@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
mutex_unlock(&root->chain_lock);
return err;
}
- if (tree_remove_node(&ft->node))
+ if (tree_remove_node(&ft->node, false))
mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
ft->id);
mutex_unlock(&root->chain_lock);
@@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
{
- if (tree_remove_node(&fg->node))
+ if (tree_remove_node(&fg->node, false))
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
fg->id);
}
@@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node)
tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter);
- tree_put_node(node);
- tree_remove_node(node);
+ tree_put_node(node, false);
+ tree_remove_node(node, false);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 2dc86347af58..87de0e4d9124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -172,6 +172,7 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
+ int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 48aa6e030bcf..959605559858 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
err);
}
-int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
-{
- struct mlx5_lag *ldev;
- int n;
-
- ldev = mlx5_lag_dev_get(dev);
- if (!ldev) {
- mlx5_core_warn(dev, "no lag device, can't get pf num\n");
- return -EINVAL;
- }
-
- for (n = 0; n < MLX5_MAX_PORTS; n++)
- if (ldev->pf[n].dev == dev) {
- *pf_num = n;
- return 0;
- }
-
- mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
- return -EINVAL;
-}
-
/* Must be called with intf_mutex held */
void mlx5_lag_remove(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9529cf9623e3..7b331674622c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
-int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
-
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
void mlx5_lag_update(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 0b85c7252f9e..472f63f9fac5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -111,7 +111,6 @@ struct mlxsw_thermal {
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
struct mlxsw_thermal_module *tz_module_arr;
- unsigned int tz_module_num;
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -711,6 +710,9 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
module_tz = &thermal->tz_module_arr[module];
+ /* Skip if parent is already set (case of port split). */
+ if (module_tz->parent)
+ return 0;
module_tz->module = module;
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
@@ -718,13 +720,7 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Update trip point according to the module data. */
- err = mlxsw_thermal_module_trips_update(dev, core, module_tz);
- if (err)
- return err;
-
- thermal->tz_module_num++;
-
- return 0;
+ return mlxsw_thermal_module_trips_update(dev, core, module_tz);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
@@ -732,6 +728,7 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
if (module_tz && module_tz->tzdev) {
mlxsw_thermal_module_tz_fini(module_tz->tzdev);
module_tz->tzdev = NULL;
+ module_tz->parent = NULL;
}
}
@@ -740,6 +737,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
unsigned int module_count = mlxsw_core_max_ports(core);
+ struct mlxsw_thermal_module *module_tz;
int i, err;
thermal->tz_module_arr = kcalloc(module_count,
@@ -754,8 +752,11 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
goto err_unreg_tz_module_arr;
}
- for (i = 0; i < thermal->tz_module_num; i++) {
- err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]);
+ for (i = 0; i < module_count - 1; i++) {
+ module_tz = &thermal->tz_module_arr[i];
+ if (!module_tz->parent)
+ continue;
+ err = mlxsw_thermal_module_tz_init(module_tz);
if (err)
goto err_unreg_tz_module_arr;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 68bee9572a1b..00c390024350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -34,6 +34,18 @@ struct mlxsw_m_port {
u8 module;
};
+static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_m->base_mac);
+ return 0;
+}
+
static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
{
return 0;
@@ -314,6 +326,12 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
mlxsw_m->core = mlxsw_core;
mlxsw_m->bus_info = mlxsw_bus_info;
+ err = mlxsw_m_base_mac_get(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
err = mlxsw_m_ports_create(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4d1b4a24907f..13e6bf13ac4d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
if (adapter->csr.flags &
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
- flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
@@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
/* map TX interrupt to vector */
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
- if (flags &
- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
- int_vec_en_auto_clr |= INT_VEC_EN_(vector);
- lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
- int_vec_en_auto_clr);
- }
/* Remove TX interrupt from shared mask */
intr->vector_list[0].int_mask &= ~int_bit;
@@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
return ((++index) % rx->ring_size);
}
-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
+static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
+{
+ int length = 0;
+
+ length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
+ return __netdev_alloc_skb(rx->adapter->netdev,
+ length, GFP_ATOMIC | GFP_DMA);
+}
+
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ struct sk_buff *skb)
{
struct lan743x_rx_buffer_info *buffer_info;
struct lan743x_rx_descriptor *descriptor;
@@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
- length,
- GFP_ATOMIC | GFP_DMA);
+ buffer_info->skb = skb;
if (!(buffer_info->skb))
return -ENOMEM;
buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
@@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
/* packet is available */
if (first_index == last_index) {
/* single buffer packet */
+ struct sk_buff *new_skb = NULL;
int packet_length;
+ new_skb = lan743x_rx_allocate_skb(rx);
+ if (!new_skb) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+ */
+ lan743x_rx_reuse_ring_element(rx, first_index);
+ goto process_extension;
+ }
+
buffer_info = &rx->buffer_info[first_index];
skb = buffer_info->skb;
descriptor = &rx->ring_cpu_ptr[first_index];
@@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
skb_put(skb, packet_length - 4);
skb->protocol = eth_type_trans(skb,
rx->adapter->netdev);
- lan743x_rx_allocate_ring_element(rx, first_index);
+ lan743x_rx_init_ring_element(rx, first_index, new_skb);
} else {
int index = first_index;
@@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
if (first_index <= last_index) {
while ((index >= first_index) &&
(index <= last_index)) {
- lan743x_rx_release_ring_element(rx,
- index);
- lan743x_rx_allocate_ring_element(rx,
- index);
+ lan743x_rx_reuse_ring_element(rx,
+ index);
index = lan743x_rx_next_index(rx,
index);
}
} else {
while ((index >= first_index) ||
(index <= last_index)) {
- lan743x_rx_release_ring_element(rx,
- index);
- lan743x_rx_allocate_ring_element(rx,
- index);
+ lan743x_rx_reuse_ring_element(rx,
+ index);
index = lan743x_rx_next_index(rx,
index);
}
}
}
+process_extension:
if (extension_index >= 0) {
descriptor = &rx->ring_cpu_ptr[extension_index];
buffer_info = &rx->buffer_info[extension_index];
@@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_allocate_ring_element(rx, index);
+ struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
+
+ ret = lan743x_rx_init_ring_element(rx, index, new_skb);
if (ret)
goto cleanup;
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 10b075bc5959..b61b88cbc0c7 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3886,6 +3886,12 @@ static int ql3xxx_probe(struct pci_dev *pdev,
netif_stop_queue(ndev);
qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ if (!qdev->workqueue) {
+ unregister_netdev(ndev);
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 096515c27263..07e1c623048e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4681,6 +4681,11 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
*/
qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
ndev->name);
+ if (!qdev->workqueue) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 339b2eae2100..e33af371b169 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3181,12 +3181,16 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
const char *mac_addr;
+ int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- pdata->phy_interface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np);
+ if (ret < 0)
+ return NULL;
+ pdata->phy_interface = ret;
mac_addr = of_get_mac_address(np);
if (mac_addr)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index c883aa89b7ca..a71c900ca04f 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2805,6 +2805,11 @@ static int rocker_switchdev_event(struct notifier_block *unused,
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (unlikely(!switchdev_work->fdb_info.addr)) {
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+ }
+
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
fdb_info->addr);
/* Take a reference on the rocker device */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 0f660af01a4b..195669f550f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1147,7 +1147,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- plat_dat->interface = of_get_phy_mode(dev->of_node);
+ ret = of_get_phy_mode(dev->of_node);
+ if (ret < 0)
+ return -EINVAL;
+ plat_dat->interface = ret;
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 8f09edd811e9..50c60550f295 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
pppox_unbind_sock(sk);
}
skb_queue_purge(&sk->sk_receive_queue);
+ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
}
static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index b123b0dcf274..4671776f5623 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
static int btt_freelist_init(struct arena_info *arena)
{
- int old, new, ret;
- u32 i, map_entry;
- struct log_entry log_new, log_old;
+ int new, ret;
+ struct log_entry log_new;
+ u32 i, map_entry, log_oldmap, log_newmap;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
GFP_KERNEL);
@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++) {
- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
- if (old < 0)
- return old;
-
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
if (new < 0)
return new;
+ /* old and new map entries with any flags stripped out */
+ log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
+ log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
+
/* sub points to the next one to be overwritten */
arena->freelist[i].sub = 1 - new;
arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
- arena->freelist[i].block = le32_to_cpu(log_new.old_map);
+ arena->freelist[i].block = log_oldmap;
/*
* FIXME: if error clearing fails during init, we want to make
* the BTT read-only
*/
- if (ent_e_flag(log_new.old_map)) {
+ if (ent_e_flag(log_new.old_map) &&
+ !ent_normal(log_new.old_map)) {
+ arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i);
if (ret)
dev_err_ratelimited(to_dev(arena),
@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena)
}
/* This implies a newly created or untouched flog entry */
- if (log_new.old_map == log_new.new_map)
+ if (log_oldmap == log_newmap)
continue;
/* Check if map recovery is needed */
@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena)
NULL, NULL, 0);
if (ret)
return ret;
- if ((le32_to_cpu(log_new.new_map) != map_entry) &&
- (le32_to_cpu(log_new.old_map) == map_entry)) {
+
+ /*
+ * The map_entry from btt_read_map is stripped of any flag bits,
+ * so use the stripped out versions from the log as well for
+ * testing whether recovery is needed. For restoration, use the
+ * 'raw' version of the log entries as that captured what we
+ * were going to write originally.
+ */
+ if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
/*
* Last transaction wrote the flog, but wasn't able
* to complete the map write. So fix up the map.
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index db3cb6d4d0d4..ddff49c707b0 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -44,6 +44,8 @@
#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
+/* 'normal' is both e and z flags set */
+#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
enum btt_init_state {
INIT_UNCHECKED = 0,
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 795ad4ff35ca..b72a303176c7 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR_RO(size);
+static ssize_t log_zero_flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Y\n");
+}
+static DEVICE_ATTR_RO(log_zero_flags);
+
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_size.attr,
+ &dev_attr_log_zero_flags.attr,
NULL,
};
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index efe412a6b5b9..91b9abbf689c 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -11,6 +11,7 @@
* General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/ndctl.h>
@@ -25,6 +26,10 @@
static DEFINE_IDA(dimm_ida);
+static bool noblk;
+module_param(noblk, bool, 0444);
+MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
+
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
@@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
nvdimm->dimm_id = dimm_id;
nvdimm->provider_data = provider_data;
+ if (noblk)
+ flags |= 1 << NDD_NOBLK;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
nvdimm->num_flush = num_flush;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index a11bf4e6b451..f3d753d3169c 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
return 0; /* no label, nothing to reserve */
for_each_clear_bit_le(slot, free, nslot) {
+ struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
struct nd_namespace_label *nd_label;
struct nd_region *nd_region = NULL;
u8 label_uuid[NSLABEL_UUID_LEN];
@@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
flags = __le32_to_cpu(nd_label->flags);
+ if (test_bit(NDD_NOBLK, &nvdimm->flags))
+ flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
__le64_to_cpu(nd_label->dpa),
@@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
- int pos)
+ int pos, unsigned long flags)
{
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
if (nspm->alt_name)
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
+ nd_label->flags = __cpu_to_le32(flags);
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
nd_label->position = __cpu_to_le16(pos);
nd_label->isetcookie = __cpu_to_le64(cookie);
@@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
- int i;
+ int i, rc;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
- int rc, count = 0;
+ int count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid);
@@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
if (rc < 0)
return rc;
- rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
+ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
+ NSLABEL_FLAG_UPDATING);
+ if (rc)
+ return rc;
+ }
+
+ if (size == 0)
+ return 0;
+
+ /* Clear the UPDATING flag per UEFI 2.7 expectations */
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+
+ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
if (rc)
return rc;
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 4b077555ac70..7849bf1812c4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
if (is_nd_pfn(dev) || is_nd_btt(dev))
return false;
+ if (ndns->force_raw)
+ return false;
+
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
IORESOURCE_SYSTEM_RAM,
@@ -1506,13 +1510,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
if (dev->driver || ndns->claim)
return -EBUSY;
- if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0)
+ if (sysfs_streq(buf, "btt"))
ndns->claim_class = btt_claim_class(dev);
- else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0)
+ else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
- else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0)
+ else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
- else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0)
+ else if (sysfs_streq(buf, ""))
ndns->claim_class = NVDIMM_CCLASS_NONE;
else
return -EINVAL;
@@ -2492,6 +2496,12 @@ static int init_active_labels(struct nd_region *nd_region)
if (!label_ent)
break;
label = nd_label_active(ndd, j);
+ if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
+ u32 flags = __le32_to_cpu(label->flags);
+
+ flags &= ~NSLABEL_FLAG_LOCAL;
+ label->flags = __cpu_to_le32(flags);
+ }
label_ent->label = label;
mutex_lock(&nd_mapping->lock);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 0a701837dfc0..11b9821eba85 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -108,7 +108,6 @@ static struct platform_driver of_pmem_region_driver = {
.remove = of_pmem_region_remove,
.driver = {
.name = "of_pmem",
- .owner = THIS_MODULE,
.of_match_table = of_pmem_region_match,
},
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6f22272e8d80..d271bd731af7 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
+static u32 info_block_reserve(void)
+{
+ return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
/*
* We hotplug memory at section granularity, pad the reserved area from
* the previous section base to the namespace base address.
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
- unsigned long reserve = PHYS_PFN(SZ_8K);
+ unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ u32 reserve = info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
res->end -= end_trunc;
if (nd_pfn->mode == PFN_MODE_RAM) {
- if (offset < SZ_8K)
+ if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
pgmap->altmap_valid = false;
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns);
memcpy(altmap, &__altmap, sizeof(*altmap));
- altmap->free = PHYS_PFN(offset - SZ_8K);
+ altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
pgmap->altmap_valid = true;
} else
@@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED
|| !IS_ALIGNED(end, nd_pfn->align)
- || nd_region_conflict(nd_region, start, size + adjust))
+ || nd_region_conflict(nd_region, start, size))
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
}
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
- u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ u32 start_pad, end_trunc, reserve = info_block_reserve();
resource_size_t start, size;
struct nd_region *nd_region;
- u32 start_pad, end_trunc;
struct nd_pfn_sb *pfn_sb;
unsigned long npfns;
phys_addr_t offset;
@@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
*/
start = nsio->res.start + start_pad;
size = resource_size(&nsio->res);
- npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+ npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
/ PAGE_SIZE);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
@@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*/
- offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+ offset = ALIGN(start + reserve + 64 * npfns,
max(nd_pfn->align, PMD_SIZE)) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + SZ_8K + dax_label_reserve,
- nd_pfn->align) - start;
+ offset = ALIGN(start + reserve, nd_pfn->align) - start;
else
return -ENXIO;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e2818f94f292..3b58baa44b5c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
+
+ if (test_bit(NDD_NOBLK, &nvdimm->flags)
+ && dev_type == &nd_blk_device_type) {
+ dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
+ caller, dev_name(&nvdimm->dev), i);
+ return NULL;
+ }
}
if (dev_type == &nd_blk_device_type) {
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index d7f97167cac3..0420f7e8ad5b 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -760,7 +760,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
old_freq, freq);
/* Scaling up? Configure required OPPs before frequency */
- if (freq > old_freq) {
+ if (freq >= old_freq) {
ret = _set_required_opps(dev, opp_table, opp);
if (ret)
goto put_opp;
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 62504b18f198..c10c782d15aa 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -173,7 +173,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct opp_table **required_opp_tables;
struct device **genpd_virt_devs = NULL;
struct device_node *required_np, *np;
- int count, i;
+ int count, count_pd, i;
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
@@ -186,7 +186,19 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
if (!count)
goto put_np;
- if (count > 1) {
+ /*
+ * Check the number of power-domains to know if we need to deal
+ * with virtual devices. In some cases we have devices with multiple
+ * power domains but with only one of them being scalable, hence
+ * 'count' could be 1, but we still have to deal with multiple genpds
+ * and virtual devices.
+ */
+ count_pd = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (!count_pd)
+ goto put_np;
+
+ if (count_pd > 1) {
genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs),
GFP_KERNEL);
if (!genpd_virt_devs)
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index a8f47df0655a..54f8238aac0d 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -192,14 +192,23 @@ config PWM_IMG
To compile this driver as a module, choose M here: the module
will be called pwm-img
-config PWM_IMX
- tristate "i.MX PWM support"
+config PWM_IMX1
+ tristate "i.MX1 PWM support"
depends on ARCH_MXC
help
- Generic PWM framework driver for i.MX.
+ Generic PWM framework driver for i.MX1 and i.MX21
To compile this driver as a module, choose M here: the module
- will be called pwm-imx.
+ will be called pwm-imx1.
+
+config PWM_IMX27
+ tristate "i.MX27 PWM support"
+ depends on ARCH_MXC
+ help
+ Generic PWM framework driver for i.MX27 and later i.MX SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-imx27.
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 9c676a0dadf5..448825e892bc 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -17,7 +17,8 @@ obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
-obj-$(CONFIG_PWM_IMX) += pwm-imx.o
+obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
+obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 1581f6ab1b1f..3149204567f3 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -472,7 +472,10 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
state->duty_cycle > state->period)
return -EINVAL;
- if (!memcmp(state, &pwm->state, sizeof(*state)))
+ if (state->period == pwm->state.period &&
+ state->duty_cycle == pwm->state.duty_cycle &&
+ state->polarity == pwm->state.polarity &&
+ state->enabled == pwm->state.enabled)
return 0;
if (pwm->chip->ops->apply) {
@@ -1033,10 +1036,7 @@ static int pwm_seq_show(struct seq_file *s, void *v)
dev_name(chip->dev), chip->npwm,
(chip->npwm != 1) ? "s" : "");
- if (chip->ops->dbg_show)
- chip->ops->dbg_show(chip, s);
- else
- pwm_dbg_show(chip, s);
+ pwm_dbg_show(chip, s);
return 0;
}
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 530d7dc5f1b5..a9fd6f0d408c 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -48,16 +48,6 @@
#define PWMV2_CPRD 0x0C
#define PWMV2_CPRDUPD 0x10
-/*
- * Max value for duty and period
- *
- * Although the duty and period register is 32 bit,
- * however only the LSB 16 bits are significant.
- */
-#define PWM_MAX_DTY 0xFFFF
-#define PWM_MAX_PRD 0xFFFF
-#define PRD_MAX_PRES 10
-
struct atmel_pwm_registers {
u8 period;
u8 period_upd;
@@ -65,11 +55,21 @@ struct atmel_pwm_registers {
u8 duty_upd;
};
+struct atmel_pwm_config {
+ u32 max_period;
+ u32 max_pres;
+};
+
+struct atmel_pwm_data {
+ struct atmel_pwm_registers regs;
+ struct atmel_pwm_config cfg;
+};
+
struct atmel_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
- const struct atmel_pwm_registers *regs;
+ const struct atmel_pwm_data *data;
unsigned int updated_pwms;
/* ISR is cleared when read, ensure only one thread does that */
@@ -121,10 +121,10 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
cycles *= clk_get_rate(atmel_pwm->clk);
do_div(cycles, NSEC_PER_SEC);
- for (*pres = 0; cycles > PWM_MAX_PRD; cycles >>= 1)
+ for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1)
(*pres)++;
- if (*pres > PRD_MAX_PRES) {
+ if (*pres > atmel_pwm->data->cfg.max_pres) {
dev_err(chip->dev, "pres exceeds the maximum value\n");
return -EINVAL;
}
@@ -150,15 +150,15 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
u32 val;
- if (atmel_pwm->regs->duty_upd ==
- atmel_pwm->regs->period_upd) {
+ if (atmel_pwm->data->regs.duty_upd ==
+ atmel_pwm->data->regs.period_upd) {
val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
val &= ~PWM_CMR_UPD_CDTY;
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
}
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->duty_upd, cdty);
+ atmel_pwm->data->regs.duty_upd, cdty);
}
static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
@@ -168,9 +168,9 @@ static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->duty, cdty);
+ atmel_pwm->data->regs.duty, cdty);
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->period, cprd);
+ atmel_pwm->data->regs.period, cprd);
}
static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -225,7 +225,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
cstate.polarity == state->polarity &&
cstate.period == state->period) {
cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->period);
+ atmel_pwm->data->regs.period);
atmel_pwm_calculate_cdty(state, cprd, &cdty);
atmel_pwm_update_cdty(chip, pwm, cdty);
return 0;
@@ -277,27 +277,55 @@ static const struct pwm_ops atmel_pwm_ops = {
.owner = THIS_MODULE,
};
-static const struct atmel_pwm_registers atmel_pwm_regs_v1 = {
- .period = PWMV1_CPRD,
- .period_upd = PWMV1_CUPD,
- .duty = PWMV1_CDTY,
- .duty_upd = PWMV1_CUPD,
+static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
+ .regs = {
+ .period = PWMV1_CPRD,
+ .period_upd = PWMV1_CUPD,
+ .duty = PWMV1_CDTY,
+ .duty_upd = PWMV1_CUPD,
+ },
+ .cfg = {
+ /* 16 bits to keep period and duty. */
+ .max_period = 0xffff,
+ .max_pres = 10,
+ },
+};
+
+static const struct atmel_pwm_data atmel_sama5_pwm_data = {
+ .regs = {
+ .period = PWMV2_CPRD,
+ .period_upd = PWMV2_CPRDUPD,
+ .duty = PWMV2_CDTY,
+ .duty_upd = PWMV2_CDTYUPD,
+ },
+ .cfg = {
+ /* 16 bits to keep period and duty. */
+ .max_period = 0xffff,
+ .max_pres = 10,
+ },
};
-static const struct atmel_pwm_registers atmel_pwm_regs_v2 = {
- .period = PWMV2_CPRD,
- .period_upd = PWMV2_CPRDUPD,
- .duty = PWMV2_CDTY,
- .duty_upd = PWMV2_CDTYUPD,
+static const struct atmel_pwm_data mchp_sam9x60_pwm_data = {
+ .regs = {
+ .period = PWMV1_CPRD,
+ .period_upd = PWMV1_CUPD,
+ .duty = PWMV1_CDTY,
+ .duty_upd = PWMV1_CUPD,
+ },
+ .cfg = {
+ /* 32 bits to keep period and duty. */
+ .max_period = 0xffffffff,
+ .max_pres = 10,
+ },
};
static const struct platform_device_id atmel_pwm_devtypes[] = {
{
.name = "at91sam9rl-pwm",
- .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v1,
+ .driver_data = (kernel_ulong_t)&atmel_sam9rl_pwm_data,
}, {
.name = "sama5d3-pwm",
- .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v2,
+ .driver_data = (kernel_ulong_t)&atmel_sama5_pwm_data,
}, {
/* sentinel */
},
@@ -307,20 +335,23 @@ MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes);
static const struct of_device_id atmel_pwm_dt_ids[] = {
{
.compatible = "atmel,at91sam9rl-pwm",
- .data = &atmel_pwm_regs_v1,
+ .data = &atmel_sam9rl_pwm_data,
}, {
.compatible = "atmel,sama5d3-pwm",
- .data = &atmel_pwm_regs_v2,
+ .data = &atmel_sama5_pwm_data,
}, {
.compatible = "atmel,sama5d2-pwm",
- .data = &atmel_pwm_regs_v2,
+ .data = &atmel_sama5_pwm_data,
+ }, {
+ .compatible = "microchip,sam9x60-pwm",
+ .data = &mchp_sam9x60_pwm_data,
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
-static inline const struct atmel_pwm_registers *
+static inline const struct atmel_pwm_data *
atmel_pwm_get_driver_data(struct platform_device *pdev)
{
const struct platform_device_id *id;
@@ -330,18 +361,18 @@ atmel_pwm_get_driver_data(struct platform_device *pdev)
id = platform_get_device_id(pdev);
- return (struct atmel_pwm_registers *)id->driver_data;
+ return (struct atmel_pwm_data *)id->driver_data;
}
static int atmel_pwm_probe(struct platform_device *pdev)
{
- const struct atmel_pwm_registers *regs;
+ const struct atmel_pwm_data *data;
struct atmel_pwm_chip *atmel_pwm;
struct resource *res;
int ret;
- regs = atmel_pwm_get_driver_data(pdev);
- if (!regs)
+ data = atmel_pwm_get_driver_data(pdev);
+ if (!data)
return -ENODEV;
atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
@@ -373,7 +404,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
- atmel_pwm->regs = regs;
+ atmel_pwm->data = data;
atmel_pwm->updated_pwms = 0;
mutex_init(&atmel_pwm->isr_lock);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 09a95aeb3a70..81da91df2529 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -45,25 +45,25 @@
* high or low depending on its state at that exact instant.
*/
-#define PWM_CONTROL_OFFSET (0x00000000)
+#define PWM_CONTROL_OFFSET 0x00000000
#define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan))
#define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan))
#define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan))
#define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan)
-#define PRESCALE_OFFSET (0x00000004)
+#define PRESCALE_OFFSET 0x00000004
#define PRESCALE_SHIFT(chan) ((chan) << 2)
#define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan))
-#define PRESCALE_MIN (0x00000000)
-#define PRESCALE_MAX (0x00000007)
+#define PRESCALE_MIN 0x00000000
+#define PRESCALE_MAX 0x00000007
#define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3))
-#define PERIOD_COUNT_MIN (0x00000002)
-#define PERIOD_COUNT_MAX (0x00ffffff)
+#define PERIOD_COUNT_MIN 0x00000002
+#define PERIOD_COUNT_MAX 0x00ffffff
#define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3))
-#define DUTY_CYCLE_HIGH_MIN (0x00000000)
-#define DUTY_CYCLE_HIGH_MAX (0x00ffffff)
+#define DUTY_CYCLE_HIGH_MIN 0x00000000
+#define DUTY_CYCLE_HIGH_MAX 0x00ffffff
struct kona_pwmc {
struct pwm_chip chip;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 27c107e78d59..a0b09603d13d 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -49,15 +49,30 @@ struct hibvt_pwm_chip {
struct clk *clk;
void __iomem *base;
struct reset_control *rstc;
+ const struct hibvt_pwm_soc *soc;
};
struct hibvt_pwm_soc {
u32 num_pwms;
+ bool quirk_force_enable;
};
-static const struct hibvt_pwm_soc pwm_soc[2] = {
- { .num_pwms = 4 },
- { .num_pwms = 8 },
+static const struct hibvt_pwm_soc hi3516cv300_soc_info = {
+ .num_pwms = 4,
+};
+
+static const struct hibvt_pwm_soc hi3519v100_soc_info = {
+ .num_pwms = 8,
+};
+
+static const struct hibvt_pwm_soc hi3559v100_shub_soc_info = {
+ .num_pwms = 8,
+ .quirk_force_enable = true,
+};
+
+static const struct hibvt_pwm_soc hi3559v100_soc_info = {
+ .num_pwms = 2,
+ .quirk_force_enable = true,
};
static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
@@ -148,13 +163,23 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
if (state->polarity != pwm->state.polarity)
hibvt_pwm_set_polarity(chip, pwm, state->polarity);
if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle)
+ state->duty_cycle != pwm->state.duty_cycle) {
hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ /*
+ * Some implementations require the PWM to be enabled twice
+ * each time the duty cycle is refreshed.
+ */
+ if (hi_pwm_chip->soc->quirk_force_enable && state->enabled)
+ hibvt_pwm_enable(chip, pwm);
+ }
+
if (state->enabled != pwm->state.enabled) {
if (state->enabled)
hibvt_pwm_enable(chip, pwm);
@@ -198,6 +223,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.npwm = soc->num_pwms;
pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
pwm_chip->chip.of_pwm_n_cells = 3;
+ pwm_chip->soc = soc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
@@ -250,8 +276,14 @@ static int hibvt_pwm_remove(struct platform_device *pdev)
}
static const struct of_device_id hibvt_pwm_of_match[] = {
- { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] },
- { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] },
+ { .compatible = "hisilicon,hi3516cv300-pwm",
+ .data = &hi3516cv300_soc_info },
+ { .compatible = "hisilicon,hi3519v100-pwm",
+ .data = &hi3519v100_soc_info },
+ { .compatible = "hisilicon,hi3559v100-shub-pwm",
+ .data = &hi3559v100_shub_soc_info },
+ { .compatible = "hisilicon,hi3559v100-pwm",
+ .data = &hi3559v100_soc_info },
{ }
};
MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match);
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
new file mode 100644
index 000000000000..f8b2c2e001a7
--- /dev/null
+++ b/drivers/pwm/pwm-imx1.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * simple driver for PWM (Pulse Width Modulator) controller
+ *
+ * Derived from pxa PWM driver by eric miao <eric.miao@marvell.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define MX1_PWMC 0x00 /* PWM Control Register */
+#define MX1_PWMS 0x04 /* PWM Sample Register */
+#define MX1_PWMP 0x08 /* PWM Period Register */
+
+#define MX1_PWMC_EN BIT(4)
+
+struct pwm_imx1_chip {
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+ void __iomem *mmio_base;
+ struct pwm_chip chip;
+};
+
+#define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip)
+
+static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ int ret;
+
+ ret = clk_prepare_enable(imx->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(imx->clk_per);
+ if (ret) {
+ clk_disable_unprepare(imx->clk_ipg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pwm_imx1_clk_disable_unprepare(struct pwm_chip *chip)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+
+ clk_disable_unprepare(imx->clk_per);
+ clk_disable_unprepare(imx->clk_ipg);
+}
+
+static int pwm_imx1_config(struct pwm_chip *chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ u32 max, p;
+
+ /*
+ * The PWM subsystem allows for exact frequencies. However,
+ * I cannot connect a scope on my device to the PWM line and
+ * thus cannot provide the program the PWM controller
+ * exactly. Instead, I'm relying on the fact that the
+ * Bootloader (u-boot or WinCE+haret) has programmed the PWM
+ * function group already. So I'll just modify the PWM sample
+ * register to follow the ratio of duty_ns vs. period_ns
+ * accordingly.
+ *
+ * This is good enough for programming the brightness of
+ * the LCD backlight.
+ *
+ * The real implementation would divide PERCLK[0] first by
+ * both the prescaler (/1 .. /128) and then by CLKSEL
+ * (/2 .. /16).
+ */
+ max = readl(imx->mmio_base + MX1_PWMP);
+ p = max * duty_ns / period_ns;
+
+ writel(max - p, imx->mmio_base + MX1_PWMS);
+
+ return 0;
+}
+
+static int pwm_imx1_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ u32 value;
+ int ret;
+
+ ret = pwm_imx1_clk_prepare_enable(chip);
+ if (ret < 0)
+ return ret;
+
+ value = readl(imx->mmio_base + MX1_PWMC);
+ value |= MX1_PWMC_EN;
+ writel(value, imx->mmio_base + MX1_PWMC);
+
+ return 0;
+}
+
+static void pwm_imx1_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ u32 value;
+
+ value = readl(imx->mmio_base + MX1_PWMC);
+ value &= ~MX1_PWMC_EN;
+ writel(value, imx->mmio_base + MX1_PWMC);
+
+ pwm_imx1_clk_disable_unprepare(chip);
+}
+
+static const struct pwm_ops pwm_imx1_ops = {
+ .enable = pwm_imx1_enable,
+ .disable = pwm_imx1_disable,
+ .config = pwm_imx1_config,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id pwm_imx1_dt_ids[] = {
+ { .compatible = "fsl,imx1-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
+
+static int pwm_imx1_probe(struct platform_device *pdev)
+{
+ struct pwm_imx1_chip *imx;
+ struct resource *r;
+
+ imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
+ if (!imx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, imx);
+
+ imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx->clk_ipg)) {
+ dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
+ PTR_ERR(imx->clk_ipg));
+ return PTR_ERR(imx->clk_ipg);
+ }
+
+ imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(imx->clk_per)) {
+ int ret = PTR_ERR(imx->clk_per);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get peripheral clock: %d\n",
+ ret);
+
+ return ret;
+ }
+
+ imx->chip.ops = &pwm_imx1_ops;
+ imx->chip.dev = &pdev->dev;
+ imx->chip.base = -1;
+ imx->chip.npwm = 1;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(imx->mmio_base))
+ return PTR_ERR(imx->mmio_base);
+
+ return pwmchip_add(&imx->chip);
+}
+
+static int pwm_imx1_remove(struct platform_device *pdev)
+{
+ struct pwm_imx1_chip *imx = platform_get_drvdata(pdev);
+
+ pwm_imx1_clk_disable_unprepare(&imx->chip);
+
+ return pwmchip_remove(&imx->chip);
+}
+
+static struct platform_driver pwm_imx1_driver = {
+ .driver = {
+ .name = "pwm-imx1",
+ .of_match_table = pwm_imx1_dt_ids,
+ },
+ .probe = pwm_imx1_probe,
+ .remove = pwm_imx1_remove,
+};
+module_platform_driver(pwm_imx1_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx27.c
index 55a3a363d5be..806130654211 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -19,16 +19,6 @@
#include <linux/pwm.h>
#include <linux/slab.h>
-/* i.MX1 and i.MX21 share the same PWM function block: */
-
-#define MX1_PWMC 0x00 /* PWM Control Register */
-#define MX1_PWMS 0x04 /* PWM Sample Register */
-#define MX1_PWMP 0x08 /* PWM Period Register */
-
-#define MX1_PWMC_EN BIT(4)
-
-/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */
-
#define MX3_PWMCR 0x00 /* PWM Control Register */
#define MX3_PWMSR 0x04 /* PWM Status Register */
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
@@ -86,21 +76,18 @@
/* PWMPR register value of 0xffff has the same effect as 0xfffe */
#define MX3_PWMPR_MAX 0xfffe
-struct imx_chip {
+struct pwm_imx27_chip {
struct clk *clk_ipg;
-
struct clk *clk_per;
-
void __iomem *mmio_base;
-
struct pwm_chip chip;
};
-#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip)
+#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
-static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip)
+static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
int ret;
ret = clk_prepare_enable(imx->clk_ipg);
@@ -116,35 +103,32 @@ static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip)
return 0;
}
-static void imx_pwm_clk_disable_unprepare(struct pwm_chip *chip)
+static void pwm_imx27_clk_disable_unprepare(struct pwm_chip *chip)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
clk_disable_unprepare(imx->clk_per);
clk_disable_unprepare(imx->clk_ipg);
}
-static void imx_pwm_get_state(struct pwm_chip *chip,
- struct pwm_device *pwm, struct pwm_state *state)
+static void pwm_imx27_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm, struct pwm_state *state)
{
- struct imx_chip *imx = to_imx_chip(chip);
- u32 period, prescaler, pwm_clk, ret, val;
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
+ u32 period, prescaler, pwm_clk, val;
u64 tmp;
+ int ret;
- ret = imx_pwm_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(chip);
if (ret < 0)
return;
val = readl(imx->mmio_base + MX3_PWMCR);
- if (val & MX3_PWMCR_EN) {
+ if (val & MX3_PWMCR_EN)
state->enabled = true;
- ret = imx_pwm_clk_prepare_enable(chip);
- if (ret)
- return;
- } else {
+ else
state->enabled = false;
- }
switch (FIELD_GET(MX3_PWMCR_POUTC, val)) {
case MX3_PWMCR_POUTC_NORMAL:
@@ -176,70 +160,13 @@ static void imx_pwm_get_state(struct pwm_chip *chip,
state->duty_cycle = 0;
}
- imx_pwm_clk_disable_unprepare(chip);
-}
-
-static int imx_pwm_config_v1(struct pwm_chip *chip,
- struct pwm_device *pwm, int duty_ns, int period_ns)
-{
- struct imx_chip *imx = to_imx_chip(chip);
-
- /*
- * The PWM subsystem allows for exact frequencies. However,
- * I cannot connect a scope on my device to the PWM line and
- * thus cannot provide the program the PWM controller
- * exactly. Instead, I'm relying on the fact that the
- * Bootloader (u-boot or WinCE+haret) has programmed the PWM
- * function group already. So I'll just modify the PWM sample
- * register to follow the ratio of duty_ns vs. period_ns
- * accordingly.
- *
- * This is good enough for programming the brightness of
- * the LCD backlight.
- *
- * The real implementation would divide PERCLK[0] first by
- * both the prescaler (/1 .. /128) and then by CLKSEL
- * (/2 .. /16).
- */
- u32 max = readl(imx->mmio_base + MX1_PWMP);
- u32 p = max * duty_ns / period_ns;
- writel(max - p, imx->mmio_base + MX1_PWMS);
-
- return 0;
-}
-
-static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct imx_chip *imx = to_imx_chip(chip);
- u32 val;
- int ret;
-
- ret = imx_pwm_clk_prepare_enable(chip);
- if (ret < 0)
- return ret;
-
- val = readl(imx->mmio_base + MX1_PWMC);
- val |= MX1_PWMC_EN;
- writel(val, imx->mmio_base + MX1_PWMC);
-
- return 0;
-}
-
-static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct imx_chip *imx = to_imx_chip(chip);
- u32 val;
-
- val = readl(imx->mmio_base + MX1_PWMC);
- val &= ~MX1_PWMC_EN;
- writel(val, imx->mmio_base + MX1_PWMC);
-
- imx_pwm_clk_disable_unprepare(chip);
+ if (!state->enabled)
+ pwm_imx27_clk_disable_unprepare(chip);
}
-static void imx_pwm_sw_reset(struct pwm_chip *chip)
+static void pwm_imx27_sw_reset(struct pwm_chip *chip)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
struct device *dev = chip->dev;
int wait_count = 0;
u32 cr;
@@ -255,10 +182,10 @@ static void imx_pwm_sw_reset(struct pwm_chip *chip)
dev_warn(dev, "software reset timeout\n");
}
-static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip,
- struct pwm_device *pwm)
+static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
+ struct pwm_device *pwm)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
struct device *dev = chip->dev;
unsigned int period_ms;
int fifoav;
@@ -277,11 +204,11 @@ static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip,
}
}
-static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
unsigned long period_cycles, duty_cycles, prescale;
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
struct pwm_state cstate;
unsigned long long c;
int ret;
@@ -318,13 +245,13 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
* enabled.
*/
if (cstate.enabled) {
- imx_pwm_wait_fifo_slot(chip, pwm);
+ pwm_imx27_wait_fifo_slot(chip, pwm);
} else {
- ret = imx_pwm_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(chip);
if (ret)
return ret;
- imx_pwm_sw_reset(chip);
+ pwm_imx27_sw_reset(chip);
}
writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
@@ -343,64 +270,35 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
} else if (cstate.enabled) {
writel(0, imx->mmio_base + MX3_PWMCR);
- imx_pwm_clk_disable_unprepare(chip);
+ pwm_imx27_clk_disable_unprepare(chip);
}
return 0;
}
-static const struct pwm_ops imx_pwm_ops_v1 = {
- .enable = imx_pwm_enable_v1,
- .disable = imx_pwm_disable_v1,
- .config = imx_pwm_config_v1,
+static const struct pwm_ops pwm_imx27_ops = {
+ .apply = pwm_imx27_apply,
+ .get_state = pwm_imx27_get_state,
.owner = THIS_MODULE,
};
-static const struct pwm_ops imx_pwm_ops_v2 = {
- .apply = imx_pwm_apply_v2,
- .get_state = imx_pwm_get_state,
- .owner = THIS_MODULE,
-};
-
-struct imx_pwm_data {
- bool polarity_supported;
- const struct pwm_ops *ops;
-};
-
-static struct imx_pwm_data imx_pwm_data_v1 = {
- .ops = &imx_pwm_ops_v1,
-};
-
-static struct imx_pwm_data imx_pwm_data_v2 = {
- .polarity_supported = true,
- .ops = &imx_pwm_ops_v2,
-};
-
-static const struct of_device_id imx_pwm_dt_ids[] = {
- { .compatible = "fsl,imx1-pwm", .data = &imx_pwm_data_v1, },
- { .compatible = "fsl,imx27-pwm", .data = &imx_pwm_data_v2, },
+static const struct of_device_id pwm_imx27_dt_ids[] = {
+ { .compatible = "fsl,imx27-pwm", },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, imx_pwm_dt_ids);
+MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
-static int imx_pwm_probe(struct platform_device *pdev)
+static int pwm_imx27_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(imx_pwm_dt_ids, &pdev->dev);
- const struct imx_pwm_data *data;
- struct imx_chip *imx;
+ struct pwm_imx27_chip *imx;
struct resource *r;
- int ret = 0;
-
- if (!of_id)
- return -ENODEV;
-
- data = of_id->data;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (imx == NULL)
return -ENOMEM;
+ platform_set_drvdata(pdev, imx);
+
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg)) {
dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
@@ -410,57 +308,51 @@ static int imx_pwm_probe(struct platform_device *pdev)
imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(imx->clk_per)) {
- dev_err(&pdev->dev, "getting per clock failed with %ld\n",
- PTR_ERR(imx->clk_per));
- return PTR_ERR(imx->clk_per);
+ int ret = PTR_ERR(imx->clk_per);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get peripheral clock: %d\n",
+ ret);
+
+ return ret;
}
- imx->chip.ops = data->ops;
+ imx->chip.ops = &pwm_imx27_ops;
imx->chip.dev = &pdev->dev;
imx->chip.base = -1;
imx->chip.npwm = 1;
- if (data->polarity_supported) {
- dev_dbg(&pdev->dev, "PWM supports output inversion\n");
- imx->chip.of_xlate = of_pwm_xlate_with_flags;
- imx->chip.of_pwm_n_cells = 3;
- }
+ imx->chip.of_xlate = of_pwm_xlate_with_flags;
+ imx->chip.of_pwm_n_cells = 3;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
- ret = pwmchip_add(&imx->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, imx);
- return 0;
+ return pwmchip_add(&imx->chip);
}
-static int imx_pwm_remove(struct platform_device *pdev)
+static int pwm_imx27_remove(struct platform_device *pdev)
{
- struct imx_chip *imx;
+ struct pwm_imx27_chip *imx;
imx = platform_get_drvdata(pdev);
- if (imx == NULL)
- return -ENODEV;
- imx_pwm_clk_disable_unprepare(&imx->chip);
+ pwm_imx27_clk_disable_unprepare(&imx->chip);
return pwmchip_remove(&imx->chip);
}
static struct platform_driver imx_pwm_driver = {
- .driver = {
- .name = "imx-pwm",
- .of_match_table = imx_pwm_dt_ids,
+ .driver = {
+ .name = "pwm-imx27",
+ .of_match_table = pwm_imx27_dt_ids,
},
- .probe = imx_pwm_probe,
- .remove = imx_pwm_remove,
+ .probe = pwm_imx27_probe,
+ .remove = pwm_imx27_remove,
};
-
module_platform_driver(imx_pwm_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 893940d45f0d..15803c71fe80 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -277,10 +277,21 @@ static const struct mtk_pwm_data mt8173_pwm_data = {
.commit_mask = 0x1,
};
+static const struct mtk_pwm_data mt8183_pwm_data = {
+ .enable_mask = BIT(0),
+ .con0 = 0x18,
+ .con0_sel = 0x0,
+ .con1 = 0x1c,
+ .has_commit = false,
+ .bls_debug = 0x80,
+ .bls_debug_mask = 0x3,
+};
+
static const struct of_device_id mtk_disp_pwm_of_match[] = {
{ .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data},
{ .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data},
{ .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data},
+ { .compatible = "mediatek,mt8183-disp-pwm", .data = &mt8183_pwm_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index a41812fc6f95..cfe7dd1b448e 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -68,19 +70,15 @@ static void rcar_pwm_update(struct rcar_pwm_chip *rp, u32 mask, u32 data,
static int rcar_pwm_get_clock_division(struct rcar_pwm_chip *rp, int period_ns)
{
unsigned long clk_rate = clk_get_rate(rp->clk);
- unsigned long long max; /* max cycle / nanoseconds */
- unsigned int div;
+ u64 div, tmp;
if (clk_rate == 0)
return -EINVAL;
- for (div = 0; div <= RCAR_PWM_MAX_DIVISION; div++) {
- max = (unsigned long long)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE *
- (1 << div);
- do_div(max, clk_rate);
- if (period_ns <= max)
- break;
- }
+ div = (u64)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE;
+ tmp = (u64)period_ns * clk_rate + div - 1;
+ tmp = div64_u64(tmp, div);
+ div = ilog2(tmp - 1) + 1;
return (div <= RCAR_PWM_MAX_DIVISION) ? div : -ERANGE;
}
@@ -139,39 +137,8 @@ static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
pm_runtime_put(chip->dev);
}
-static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int rcar_pwm_enable(struct rcar_pwm_chip *rp)
{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
- int div, ret;
-
- div = rcar_pwm_get_clock_division(rp, period_ns);
- if (div < 0)
- return div;
-
- /*
- * Let the core driver set pwm->period if disabled and duty_ns == 0.
- * But, this driver should prevent to set the new duty_ns if current
- * duty_cycle is not set
- */
- if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
- return 0;
-
- rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
-
- ret = rcar_pwm_set_counter(rp, div, duty_ns, period_ns);
- if (!ret)
- rcar_pwm_set_clock_control(rp, div);
-
- /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */
- rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR);
-
- return ret;
-}
-
-static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
u32 value;
/* Don't enable the PWM device if CYC0 or PH0 is 0 */
@@ -185,19 +152,51 @@ static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return 0;
}
-static void rcar_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void rcar_pwm_disable(struct rcar_pwm_chip *rp)
+{
+ rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR);
+}
+
+static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
+ struct pwm_state cur_state;
+ int div, ret;
- rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR);
+ /* This HW/driver only supports normal polarity */
+ pwm_get_state(pwm, &cur_state);
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -ENOTSUPP;
+
+ if (!state->enabled) {
+ rcar_pwm_disable(rp);
+ return 0;
+ }
+
+ div = rcar_pwm_get_clock_division(rp, state->period);
+ if (div < 0)
+ return div;
+
+ rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
+
+ ret = rcar_pwm_set_counter(rp, div, state->duty_cycle, state->period);
+ if (!ret)
+ rcar_pwm_set_clock_control(rp, div);
+
+ /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */
+ rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR);
+
+ if (!ret && state->enabled)
+ ret = rcar_pwm_enable(rp);
+
+ return ret;
}
static const struct pwm_ops rcar_pwm_ops = {
.request = rcar_pwm_request,
.free = rcar_pwm_free,
- .config = rcar_pwm_config,
- .enable = rcar_pwm_enable,
- .disable = rcar_pwm_disable,
+ .apply = rcar_pwm_apply,
.owner = THIS_MODULE,
};
@@ -279,18 +278,16 @@ static int rcar_pwm_suspend(struct device *dev)
static int rcar_pwm_resume(struct device *dev)
{
struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
+ struct pwm_state state;
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
return 0;
pm_runtime_get_sync(dev);
- rcar_pwm_config(pwm->chip, pwm, pwm->state.duty_cycle,
- pwm->state.period);
- if (pwm_is_enabled(pwm))
- rcar_pwm_enable(pwm->chip, pwm);
+ pwm_get_state(pwm, &state);
- return 0;
+ return rcar_pwm_apply(pwm->chip, pwm, &state);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 79374d1de311..1f3ef9ee493c 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -48,7 +48,7 @@
/* list of clocks required by ADSP PIL */
static const char * const adsp_clk_id[] = {
- "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
+ "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core",
};
@@ -439,6 +439,10 @@ static int adsp_probe(struct platform_device *pdev)
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
+ if (IS_ERR(adsp->sysmon)) {
+ ret = PTR_ERR(adsp->sysmon);
+ goto disable_pm;
+ }
ret = rproc_add(rproc);
if (ret)
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 01be7314e176..eacdf10fcfaf 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -25,6 +25,8 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
@@ -131,6 +133,8 @@ struct rproc_hexagon_res {
char **proxy_clk_names;
char **reset_clk_names;
char **active_clk_names;
+ char **active_pd_names;
+ char **proxy_pd_names;
int version;
bool need_mem_protection;
bool has_alt_reset;
@@ -156,9 +160,13 @@ struct q6v5 {
struct clk *active_clks[8];
struct clk *reset_clks[4];
struct clk *proxy_clks[4];
+ struct device *active_pds[1];
+ struct device *proxy_pds[3];
int active_clk_count;
int reset_clk_count;
int proxy_clk_count;
+ int active_pd_count;
+ int proxy_pd_count;
struct reg_info active_regs[1];
struct reg_info proxy_regs[3];
@@ -188,6 +196,7 @@ struct q6v5 {
bool has_alt_reset;
int mpss_perm;
int mba_perm;
+ const char *hexagon_mdt_image;
int version;
};
@@ -321,6 +330,41 @@ static void q6v5_clk_disable(struct device *dev,
clk_disable_unprepare(clks[i]);
}
+static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0)
+ goto unroll_pd_votes;
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
bool remote_owner, phys_addr_t addr,
size_t size)
@@ -690,11 +734,23 @@ static int q6v5_mba_load(struct q6v5 *qproc)
qcom_q6v5_prepare(&qproc->q6v5);
+ ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
+ if (ret < 0) {
+ dev_err(qproc->dev, "failed to enable active power domains\n");
+ goto disable_irqs;
+ }
+
+ ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+ if (ret < 0) {
+ dev_err(qproc->dev, "failed to enable proxy power domains\n");
+ goto disable_active_pds;
+ }
+
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
- goto disable_irqs;
+ goto disable_proxy_pds;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
@@ -791,6 +847,10 @@ disable_proxy_clk:
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+disable_proxy_pds:
+ q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+disable_active_pds:
+ q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
@@ -830,6 +890,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->active_clk_count);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
+ q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
@@ -841,6 +902,8 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
ret = qcom_q6v5_unprepare(&qproc->q6v5);
if (ret) {
+ q6v5_pds_disable(qproc, qproc->proxy_pds,
+ qproc->proxy_pd_count);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
@@ -860,17 +923,26 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
bool relocate = false;
- char seg_name[10];
+ char *fw_name;
+ size_t fw_name_len;
ssize_t offset;
size_t size = 0;
void *ptr;
int ret;
int i;
- ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ fw_name_len = strlen(qproc->hexagon_mdt_image);
+ if (fw_name_len <= 4)
+ return -EINVAL;
+
+ fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ ret = request_firmware(&fw, fw_name, qproc->dev);
if (ret < 0) {
- dev_err(qproc->dev, "unable to load modem.mdt\n");
- return ret;
+ dev_err(qproc->dev, "unable to load %s\n", fw_name);
+ goto out;
}
/* Initialize the RMB validator */
@@ -918,10 +990,11 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
ptr = qproc->mpss_region + offset;
if (phdr->p_filesz) {
- snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
- ret = request_firmware(&seg_fw, seg_name, qproc->dev);
+ /* Replace "xxx.xxx" with "xxx.bxx" */
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+ ret = request_firmware(&seg_fw, fw_name, qproc->dev);
if (ret) {
- dev_err(qproc->dev, "failed to load %s\n", seg_name);
+ dev_err(qproc->dev, "failed to load %s\n", fw_name);
goto release_firmware;
}
@@ -960,6 +1033,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
release_firmware:
release_firmware(fw);
+out:
+ kfree(fw_name);
return ret < 0 ? ret : 0;
}
@@ -1075,9 +1150,10 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
unsigned long i;
int ret;
- ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
if (ret < 0) {
- dev_err(qproc->dev, "unable to load modem.mdt\n");
+ dev_err(qproc->dev, "unable to load %s\n",
+ qproc->hexagon_mdt_image);
return ret;
}
@@ -1121,6 +1197,7 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+ q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
}
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
@@ -1181,6 +1258,45 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
return i;
}
+static int q6v5_pds_attach(struct device *dev, struct device **devs,
+ char **pd_names)
+{
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR(devs[i])) {
+ ret = PTR_ERR(devs[i]);
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+};
+
+static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
static int q6v5_init_reset(struct q6v5 *qproc)
{
qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
@@ -1253,6 +1369,7 @@ static int q6v5_probe(struct platform_device *pdev)
const struct rproc_hexagon_res *desc;
struct q6v5 *qproc;
struct rproc *rproc;
+ const char *mba_image;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -1262,16 +1379,30 @@ static int q6v5_probe(struct platform_device *pdev)
if (desc->need_mem_protection && !qcom_scm_is_available())
return -EPROBE_DEFER;
+ mba_image = desc->hexagon_mba_image;
+ ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
+ 0, &mba_image);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
- desc->hexagon_mba_image, sizeof(*qproc));
+ mba_image, sizeof(*qproc));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
}
+ rproc->auto_boot = false;
+
qproc = (struct q6v5 *)rproc->priv;
qproc->dev = &pdev->dev;
qproc->rproc = rproc;
+ qproc->hexagon_mdt_image = "modem.mdt";
+ ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
+ 1, &qproc->hexagon_mdt_image);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
platform_set_drvdata(pdev, qproc);
ret = q6v5_init_mem(qproc, pdev);
@@ -1322,10 +1453,26 @@ static int q6v5_probe(struct platform_device *pdev)
}
qproc->active_reg_count = ret;
+ ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
+ desc->active_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to attach active power domains\n");
+ goto free_rproc;
+ }
+ qproc->active_pd_count = ret;
+
+ ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
+ desc->proxy_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init power domains\n");
+ goto detach_active_pds;
+ }
+ qproc->proxy_pd_count = ret;
+
qproc->has_alt_reset = desc->has_alt_reset;
ret = q6v5_init_reset(qproc);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
qproc->version = desc->version;
qproc->need_mem_protection = desc->need_mem_protection;
@@ -1333,7 +1480,7 @@ static int q6v5_probe(struct platform_device *pdev)
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
qcom_msa_handover);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
@@ -1341,13 +1488,21 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
+ if (IS_ERR(qproc->sysmon)) {
+ ret = PTR_ERR(qproc->sysmon);
+ goto detach_proxy_pds;
+ }
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
return 0;
+detach_proxy_pds:
+ q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+detach_active_pds:
+ q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
free_rproc:
rproc_free(rproc);
@@ -1364,6 +1519,10 @@ static int q6v5_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
+
+ q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
+ q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+
rproc_free(qproc->rproc);
return 0;
@@ -1388,6 +1547,16 @@ static const struct rproc_hexagon_res sdm845_mss = {
"mnoc_axi",
NULL
},
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ "mss",
+ NULL
+ },
.need_mem_protection = true,
.has_alt_reset = true,
.version = MSS_SDM845,
@@ -1395,16 +1564,26 @@ static const struct rproc_hexagon_res sdm845_mss = {
static const struct rproc_hexagon_res msm8996_mss = {
.hexagon_mba_image = "mba.mbn",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
.proxy_clk_names = (char*[]){
"xo",
"pnoc",
+ "qdss",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
- "gpll0_mss_clk",
+ "gpll0_mss",
+ "snoc_axi",
+ "mnoc_axi",
NULL
},
.need_mem_protection = true,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index b1e63fcd5fdf..f280f196d007 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -258,6 +258,7 @@ static int adsp_probe(struct platform_device *pdev)
const struct adsp_data *desc;
struct qcom_adsp *adsp;
struct rproc *rproc;
+ const char *fw_name;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -267,8 +268,14 @@ static int adsp_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
+ fw_name = desc->firmware_name;
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- desc->firmware_name, sizeof(*adsp));
+ fw_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -304,6 +311,10 @@ static int adsp_probe(struct platform_device *pdev)
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
+ if (IS_ERR(adsp->sysmon)) {
+ ret = PTR_ERR(adsp->sysmon);
+ goto free_rproc;
+ }
ret = rproc_add(rproc);
if (ret)
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index e976a602b015..c231314eab66 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -6,8 +6,9 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/notifier.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/qcom_rproc.h>
@@ -25,6 +26,7 @@ struct qcom_sysmon {
const char *name;
+ int shutdown_irq;
int ssctl_version;
int ssctl_instance;
@@ -34,6 +36,8 @@ struct qcom_sysmon {
struct rpmsg_endpoint *ept;
struct completion comp;
+ struct completion ind_comp;
+ struct completion shutdown_comp;
struct mutex lock;
bool ssr_ack;
@@ -137,6 +141,7 @@ static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
}
#define SSCTL_SHUTDOWN_REQ 0x21
+#define SSCTL_SHUTDOWN_READY_IND 0x21
#define SSCTL_SUBSYS_EVENT_REQ 0x23
#define SSCTL_MAX_MSG_LEN 7
@@ -252,6 +257,29 @@ static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = {
{}
};
+static struct qmi_elem_info ssctl_shutdown_ind_ei[] = {
+ {}
+};
+
+static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi);
+
+ complete(&sysmon->ind_comp);
+}
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SSCTL_SHUTDOWN_READY_IND,
+ .ei = ssctl_shutdown_ind_ei,
+ .decoded_size = 0,
+ .fn = sysmon_ind_cb
+ },
+ {}
+};
+
/**
* ssctl_request_shutdown() - request shutdown via SSCTL QMI service
* @sysmon: sysmon context
@@ -262,6 +290,8 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
struct qmi_txn txn;
int ret;
+ reinit_completion(&sysmon->ind_comp);
+ reinit_completion(&sysmon->shutdown_comp);
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
if (ret < 0) {
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
@@ -283,6 +313,17 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
dev_err(sysmon->dev, "shutdown request failed\n");
else
dev_dbg(sysmon->dev, "shutdown request completed\n");
+
+ if (sysmon->shutdown_irq > 0) {
+ ret = wait_for_completion_timeout(&sysmon->shutdown_comp,
+ 10 * HZ);
+ if (!ret) {
+ ret = try_wait_for_completion(&sysmon->ind_comp);
+ if (!ret)
+ dev_err(sysmon->dev,
+ "timeout waiting for shutdown ack\n");
+ }
+ }
}
/**
@@ -432,6 +473,15 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
+static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
+{
+ struct qcom_sysmon *sysmon = data;
+
+ complete(&sysmon->shutdown_comp);
+
+ return IRQ_HANDLED;
+}
+
/**
* qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc
* @rproc: rproc context to associate the subdev with
@@ -449,7 +499,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL);
if (!sysmon)
- return NULL;
+ return ERR_PTR(-ENOMEM);
sysmon->dev = rproc->dev.parent;
sysmon->rproc = rproc;
@@ -458,13 +508,37 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
sysmon->ssctl_instance = ssctl_instance;
init_completion(&sysmon->comp);
+ init_completion(&sysmon->ind_comp);
+ init_completion(&sysmon->shutdown_comp);
mutex_init(&sysmon->lock);
- ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops, NULL);
+ sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
+ "shutdown-ack");
+ if (sysmon->shutdown_irq < 0) {
+ if (sysmon->shutdown_irq != -ENODATA) {
+ dev_err(sysmon->dev,
+ "failed to retrieve shutdown-ack IRQ\n");
+ return ERR_PTR(sysmon->shutdown_irq);
+ }
+ } else {
+ ret = devm_request_threaded_irq(sysmon->dev,
+ sysmon->shutdown_irq,
+ NULL, sysmon_shutdown_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 shutdown-ack", sysmon);
+ if (ret) {
+ dev_err(sysmon->dev,
+ "failed to acquire shutdown-ack IRQ\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops,
+ qmi_indication_handler);
if (ret < 0) {
dev_err(sysmon->dev, "failed to initialize qmi handle\n");
kfree(sysmon);
- return NULL;
+ return ERR_PTR(ret);
}
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index b0e07e9f42d5..adcce523971e 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -553,6 +553,10 @@ static int wcnss_probe(struct platform_device *pdev)
qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
+ if (IS_ERR(wcnss->sysmon)) {
+ ret = PTR_ERR(wcnss->sysmon);
+ goto free_rproc;
+ }
ret = rproc_add(rproc);
if (ret)
@@ -622,5 +626,5 @@ static void __exit wcnss_exit(void)
}
module_exit(wcnss_exit);
-MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem");
+MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 54ec38fc5dca..48feebd6d0a2 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -39,12 +39,16 @@
#include <linux/idr.h>
#include <linux/elf.h>
#include <linux/crc32.h>
+#include <linux/of_reserved_mem.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
#include <asm/byteorder.h>
+#include <linux/platform_device.h>
#include "remoteproc_internal.h"
+#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
+
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
@@ -145,7 +149,7 @@ static void rproc_disable_iommu(struct rproc *rproc)
iommu_domain_free(domain);
}
-static phys_addr_t rproc_va_to_pa(void *cpu_addr)
+phys_addr_t rproc_va_to_pa(void *cpu_addr)
{
/*
* Return physical address according to virtual address location
@@ -160,6 +164,7 @@ static phys_addr_t rproc_va_to_pa(void *cpu_addr)
WARN_ON(!virt_addr_valid(cpu_addr));
return virt_to_phys(cpu_addr);
}
+EXPORT_SYMBOL(rproc_va_to_pa);
/**
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
@@ -204,6 +209,10 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
list_for_each_entry(carveout, &rproc->carveouts, node) {
int offset = da - carveout->da;
+ /* Verify that carveout is allocated */
+ if (!carveout->va)
+ continue;
+
/* try next carveout if da is too small */
if (offset < 0)
continue;
@@ -272,25 +281,27 @@ rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
* @len: associated area size
*
* This function is a helper function to verify requested device area (couple
- * da, len) is part of specified carevout.
+ * da, len) is part of specified carveout.
+ * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is
+ * checked.
*
- * Return: 0 if carveout match request else -ENOMEM
+ * Return: 0 if carveout matches request else error
*/
-int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem,
- u32 da, u32 len)
+static int rproc_check_carveout_da(struct rproc *rproc,
+ struct rproc_mem_entry *mem, u32 da, u32 len)
{
struct device *dev = &rproc->dev;
- int delta = 0;
+ int delta;
/* Check requested resource length */
if (len > mem->len) {
dev_err(dev, "Registered carveout doesn't fit len request\n");
- return -ENOMEM;
+ return -EINVAL;
}
if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
- /* Update existing carveout da */
- mem->da = da;
+ /* Address doesn't match registered carveout configuration */
+ return -EINVAL;
} else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
delta = da - mem->da;
@@ -298,13 +309,13 @@ int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem,
if (delta < 0) {
dev_err(dev,
"Registered carveout doesn't fit da request\n");
- return -ENOMEM;
+ return -EINVAL;
}
if (delta + len > mem->len) {
dev_err(dev,
"Registered carveout doesn't fit len request\n");
- return -ENOMEM;
+ return -EINVAL;
}
}
@@ -418,8 +429,25 @@ static int rproc_vdev_do_start(struct rproc_subdev *subdev)
static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
{
struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+ int ret;
- rproc_remove_virtio_dev(rvdev);
+ ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
+ if (ret)
+ dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
+}
+
+/**
+ * rproc_rvdev_release() - release the existence of a rvdev
+ *
+ * @dev: the subdevice's dev
+ */
+static void rproc_rvdev_release(struct device *dev)
+{
+ struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
+
+ of_reserved_mem_device_release(dev);
+
+ kfree(rvdev);
}
/**
@@ -455,6 +483,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
+ char name[16];
/* make sure resource isn't truncated */
if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
@@ -488,6 +517,29 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
rvdev->rproc = rproc;
rvdev->index = rproc->nb_vdev++;
+ /* Initialise vdev subdevice */
+ snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
+ rvdev->dev.parent = rproc->dev.parent;
+ rvdev->dev.release = rproc_rvdev_release;
+ dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
+ dev_set_drvdata(&rvdev->dev, rvdev);
+
+ ret = device_register(&rvdev->dev);
+ if (ret) {
+ put_device(&rvdev->dev);
+ return ret;
+ }
+ /* Make device dma capable by inheriting from parent's capabilities */
+ set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
+
+ ret = dma_coerce_mask_and_coherent(&rvdev->dev,
+ dma_get_mask(rproc->dev.parent));
+ if (ret) {
+ dev_warn(dev,
+ "Failed to set DMA mask %llx. Trying to continue... %x\n",
+ dma_get_mask(rproc->dev.parent), ret);
+ }
+
/* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) {
ret = rproc_parse_vring(rvdev, rsc, i);
@@ -518,7 +570,7 @@ unwind_vring_allocations:
for (i--; i >= 0; i--)
rproc_free_vring(&rvdev->vring[i]);
free_rvdev:
- kfree(rvdev);
+ device_unregister(&rvdev->dev);
return ret;
}
@@ -536,7 +588,7 @@ void rproc_vdev_release(struct kref *ref)
rproc_remove_subdev(rproc, &rvdev->subdev);
list_del(&rvdev->node);
- kfree(rvdev);
+ device_unregister(&rvdev->dev);
}
/**
@@ -558,9 +610,8 @@ void rproc_vdev_release(struct kref *ref)
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
int offset, int avail)
{
- struct rproc_mem_entry *trace;
+ struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
- void *ptr;
char name[15];
if (sizeof(*rsc) > avail) {
@@ -574,28 +625,23 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
return -EINVAL;
}
- /* what's the kernel address of this resource ? */
- ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
- if (!ptr) {
- dev_err(dev, "erroneous trace resource entry\n");
- return -EINVAL;
- }
-
trace = kzalloc(sizeof(*trace), GFP_KERNEL);
if (!trace)
return -ENOMEM;
/* set the trace buffer dma properties */
- trace->len = rsc->len;
- trace->va = ptr;
+ trace->trace_mem.len = rsc->len;
+ trace->trace_mem.da = rsc->da;
+
+ /* set pointer on rproc device */
+ trace->rproc = rproc;
/* make sure snprintf always null terminates, even if truncating */
snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
/* create the debugfs entry */
- trace->priv = rproc_create_trace_file(name, rproc, trace);
- if (!trace->priv) {
- trace->va = NULL;
+ trace->tfile = rproc_create_trace_file(name, rproc, trace);
+ if (!trace->tfile) {
kfree(trace);
return -EINVAL;
}
@@ -604,8 +650,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
rproc->num_traces++;
- dev_dbg(dev, "%s added: va %pK, da 0x%x, len 0x%x\n",
- name, ptr, rsc->da, rsc->len);
+ dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
+ name, rsc->da, rsc->len);
return 0;
}
@@ -715,6 +761,18 @@ static int rproc_alloc_carveout(struct rproc *rproc,
dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
va, &dma, mem->len);
+ if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
+ /*
+ * Check requested da is equal to dma address
+ * and print a warn message in case of missalignment.
+ * Don't stop rproc_start sequence as coprocessor may
+ * build pa to da translation on its side.
+ */
+ if (mem->da != (u32)dma)
+ dev_warn(dev->parent,
+ "Allocated carveout doesn't fit device address request\n");
+ }
+
/*
* Ok, this is non-standard.
*
@@ -732,15 +790,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
* to use the iommu-based DMA API: we expect 'dma' to contain the
* physical address in this case.
*/
-
- if (mem->da != FW_RSC_ADDR_ANY) {
- if (!rproc->domain) {
- dev_err(dev->parent,
- "Bad carveout rsc configuration\n");
- ret = -ENOMEM;
- goto dma_free;
- }
-
+ if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
ret = -ENOMEM;
@@ -767,11 +817,17 @@ static int rproc_alloc_carveout(struct rproc *rproc,
dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
mem->da, &dma);
- } else {
+ }
+
+ if (mem->da == FW_RSC_ADDR_ANY) {
+ /* Update device address as undefined by requester */
+ if ((u64)dma & HIGH_BITS_MASK)
+ dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
+
mem->da = (u32)dma;
}
- mem->dma = (u32)dma;
+ mem->dma = dma;
mem->va = va;
return 0;
@@ -900,7 +956,8 @@ EXPORT_SYMBOL(rproc_add_carveout);
* @dma: dma address
* @len: memory carveout length
* @da: device address
- * @release: memory carveout function
+ * @alloc: memory carveout allocation function
+ * @release: memory carveout release function
* @name: carveout name
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
@@ -1110,6 +1167,7 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
struct rproc_mem_entry *entry, *tmp;
struct fw_rsc_carveout *rsc;
struct device *dev = &rproc->dev;
+ u64 pa;
int ret;
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
@@ -1146,10 +1204,15 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
/* Use va if defined else dma to generate pa */
if (entry->va)
- rsc->pa = (u32)rproc_va_to_pa(entry->va);
+ pa = (u64)rproc_va_to_pa(entry->va);
else
- rsc->pa = (u32)entry->dma;
+ pa = (u64)entry->dma;
+
+ if (((u64)pa) & HIGH_BITS_MASK)
+ dev_warn(dev,
+ "Physical address cast in 32bit to fit resource table format\n");
+ rsc->pa = (u32)pa;
rsc->da = entry->da;
rsc->len = entry->len;
}
@@ -1182,15 +1245,16 @@ static void rproc_coredump_cleanup(struct rproc *rproc)
static void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
+ struct rproc_debug_trace *trace, *ttmp;
struct rproc_vdev *rvdev, *rvtmp;
struct device *dev = &rproc->dev;
/* clean up debugfs trace entries */
- list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
- rproc_remove_trace_file(entry->priv);
+ list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
+ rproc_remove_trace_file(trace->tfile);
rproc->num_traces--;
- list_del(&entry->node);
- kfree(entry);
+ list_del(&trace->node);
+ kfree(trace);
}
/* clean up iommu mapping entries */
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index e90135c64af0..6da934b8dc4b 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -47,10 +47,23 @@ static struct dentry *rproc_dbg;
static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct rproc_mem_entry *trace = filp->private_data;
- int len = strnlen(trace->va, trace->len);
+ struct rproc_debug_trace *data = filp->private_data;
+ struct rproc_mem_entry *trace = &data->trace_mem;
+ void *va;
+ char buf[100];
+ int len;
+
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len);
- return simple_read_from_buffer(userbuf, count, ppos, trace->va, len);
+ if (!va) {
+ len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
+ trace->name);
+ va = buf;
+ } else {
+ len = strnlen(va, trace->len);
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, va, len);
}
static const struct file_operations trace_rproc_ops = {
@@ -155,6 +168,30 @@ static const struct file_operations rproc_recovery_ops = {
.llseek = generic_file_llseek,
};
+/* expose the crash trigger via debugfs */
+static ssize_t
+rproc_crash_write(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ unsigned int type;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &type);
+ if (ret < 0)
+ return ret;
+
+ rproc_report_crash(rproc, type);
+
+ return count;
+}
+
+static const struct file_operations rproc_crash_ops = {
+ .write = rproc_crash_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
/* Expose resource table content via debugfs */
static int rproc_rsc_table_show(struct seq_file *seq, void *p)
{
@@ -288,7 +325,7 @@ void rproc_remove_trace_file(struct dentry *tfile)
}
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
- struct rproc_mem_entry *trace)
+ struct rproc_debug_trace *trace)
{
struct dentry *tfile;
@@ -325,6 +362,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
rproc, &rproc_name_ops);
debugfs_create_file("recovery", 0400, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
+ debugfs_create_file("crash", 0200, rproc->dbg_dir,
+ rproc, &rproc_crash_ops);
debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
rproc, &rproc_rsc_table_ops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index f6cad243d7ca..45ff76a06c72 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -25,6 +25,13 @@
struct rproc;
+struct rproc_debug_trace {
+ struct rproc *rproc;
+ struct dentry *tfile;
+ struct list_head node;
+ struct rproc_mem_entry trace_mem;
+};
+
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
@@ -32,12 +39,12 @@ void rproc_vdev_release(struct kref *ref);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
-void rproc_remove_virtio_dev(struct rproc_vdev *rvdev);
+int rproc_remove_virtio_dev(struct device *dev, void *data);
/* from remoteproc_debugfs.c */
void rproc_remove_trace_file(struct dentry *tfile);
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
- struct rproc_mem_entry *trace);
+ struct rproc_debug_trace *trace);
void rproc_delete_debug_dir(struct rproc *rproc);
void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
@@ -52,6 +59,7 @@ void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
void *rproc_da_to_va(struct rproc *rproc, u64 da, int len);
+phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 2d7cd344f3bf..44774de6f17b 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -17,7 +17,9 @@
* GNU General Public License for more details.
*/
+#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/of_reserved_mem.h>
#include <linux/remoteproc.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
@@ -316,6 +318,8 @@ static void rproc_virtio_dev_release(struct device *dev)
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
+ kfree(vdev);
+
kref_put(&rvdev->refcount, rproc_vdev_release);
put_device(&rproc->dev);
@@ -333,10 +337,53 @@ static void rproc_virtio_dev_release(struct device *dev)
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
struct rproc *rproc = rvdev->rproc;
- struct device *dev = &rproc->dev;
- struct virtio_device *vdev = &rvdev->vdev;
+ struct device *dev = &rvdev->dev;
+ struct virtio_device *vdev;
+ struct rproc_mem_entry *mem;
int ret;
+ /* Try to find dedicated vdev buffer carveout */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
+ if (mem) {
+ phys_addr_t pa;
+
+ if (mem->of_resm_idx != -1) {
+ struct device_node *np = rproc->dev.parent->of_node;
+
+ /* Associate reserved memory to vdev device */
+ ret = of_reserved_mem_device_init_by_idx(dev, np,
+ mem->of_resm_idx);
+ if (ret) {
+ dev_err(dev, "Can't associate reserved memory\n");
+ goto out;
+ }
+ } else {
+ if (mem->va) {
+ dev_warn(dev, "vdev %d buffer already mapped\n",
+ rvdev->index);
+ pa = rproc_va_to_pa(mem->va);
+ } else {
+ /* Use dma address as carveout no memmapped yet */
+ pa = (phys_addr_t)mem->dma;
+ }
+
+ /* Associate vdev buffer memory pool to vdev subdev */
+ ret = dma_declare_coherent_memory(dev, pa,
+ mem->da,
+ mem->len);
+ if (ret < 0) {
+ dev_err(dev, "Failed to associate buffer\n");
+ goto out;
+ }
+ }
+ }
+
+ /* Allocate virtio device */
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
vdev->id.device = id,
vdev->config = &rproc_virtio_config_ops,
vdev->dev.parent = dev;
@@ -370,11 +417,15 @@ out:
/**
* rproc_remove_virtio_dev() - remove an rproc-induced virtio device
- * @rvdev: the remote vdev
+ * @dev: the virtio device
+ * @data: must be null
*
* This function unregisters an existing virtio device.
*/
-void rproc_remove_virtio_dev(struct rproc_vdev *rvdev)
+int rproc_remove_virtio_dev(struct device *dev, void *data)
{
- unregister_virtio_device(&rvdev->vdev);
+ struct virtio_device *vdev = dev_to_virtio(dev);
+
+ unregister_virtio_device(vdev);
+ return 0;
}
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index aacef0ea3b90..51049d17b1e5 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
@@ -91,6 +92,77 @@ static void st_rproc_kick(struct rproc *rproc, int vqid)
dev_err(dev, "failed to send message via mbox: %d\n", ret);
}
+static int st_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int st_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ struct of_phandle_iterator it;
+ int index = 0;
+
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to map vdev buffer */
+ if (strcmp(it.node->name, "vdev0buffer")) {
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ st_rproc_mem_alloc,
+ st_rproc_mem_release,
+ it.node->name);
+ } else {
+ /* Register reserved memory for vdev buffer allocation */
+ mem = rproc_of_resm_mem_entry_init(dev, index,
+ rmem->size,
+ rmem->base,
+ it.node->name);
+ }
+
+ if (!mem)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ index++;
+ }
+
+ return rproc_elf_load_rsc_table(rproc, fw);
+}
+
static int st_rproc_start(struct rproc *rproc)
{
struct st_rproc *ddata = rproc->priv;
@@ -158,9 +230,14 @@ static int st_rproc_stop(struct rproc *rproc)
}
static const struct rproc_ops st_rproc_ops = {
- .kick = st_rproc_kick,
- .start = st_rproc_start,
- .stop = st_rproc_stop,
+ .kick = st_rproc_kick,
+ .start = st_rproc_start,
+ .stop = st_rproc_stop,
+ .parse_fw = st_rproc_parse_fw,
+ .load = rproc_elf_load_segments,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
};
/*
@@ -254,12 +331,6 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
return -EINVAL;
}
- err = of_reserved_mem_device_init(dev);
- if (err) {
- dev_err(dev, "Failed to obtain shared memory\n");
- return err;
- }
-
err = clk_prepare(ddata->clk);
if (err)
dev_err(dev, "failed to get clock\n");
@@ -387,8 +458,6 @@ static int st_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(ddata->clk);
- of_reserved_mem_device_release(&pdev->dev);
-
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 664f957012cd..5d3685bd76a2 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -11,21 +11,21 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/virtio.h>
-#include <linux/virtio_ids.h>
-#include <linux/virtio_config.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
#include <linux/sched.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
#include <linux/wait.h>
-#include <linux/rpmsg.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
#include "rpmsg_internal.h"
@@ -912,7 +912,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
total_buf_space = vrp->num_bufs * vrp->buf_size;
/* allocate coherent memory for the buffers */
- bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
+ bufs_va = dma_alloc_coherent(vdev->dev.parent,
total_buf_space, &vrp->bufs_dma,
GFP_KERNEL);
if (!bufs_va) {
@@ -980,7 +980,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
free_coherent:
- dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ dma_free_coherent(vdev->dev.parent, total_buf_space,
bufs_va, vrp->bufs_dma);
vqs_del:
vdev->config->del_vqs(vrp->vdev);
@@ -1015,7 +1015,7 @@ static void rpmsg_remove(struct virtio_device *vdev)
vdev->config->del_vqs(vrp->vdev);
- dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ dma_free_coherent(vdev->dev.parent, total_buf_space,
vrp->rbufs, vrp->bufs_dma);
kfree(vrp);
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 98dbc796353f..53ca9ba6ab4b 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -153,7 +153,6 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
#ifdef CONFIG_SERIAL_8250_DMA
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
.nr_channels = 2,
- .is_private = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 4095,